diff --git a/MicroBenchmarks/XRay/FDRMode/fdrmode-bench.cc b/MicroBenchmarks/XRay/FDRMode/fdrmode-bench.cc --- a/MicroBenchmarks/XRay/FDRMode/fdrmode-bench.cc +++ b/MicroBenchmarks/XRay/FDRMode/fdrmode-bench.cc @@ -69,14 +69,14 @@ [[clang::xray_never_instrument]] static void BM_XRayFDRMultiThreaded( benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { SetUpXRayFDRMultiThreaded(state); } for (auto _ : state) { val = EmptyFunction(); benchmark::DoNotOptimize(val); } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { TearDownXRayFDRMultiThreaded(state); } } diff --git a/MicroBenchmarks/libs/benchmark/.clang-tidy b/MicroBenchmarks/libs/benchmark/.clang-tidy new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/.clang-tidy @@ -0,0 +1,7 @@ +--- +Checks: 'clang-analyzer-*,readability-redundant-*,performance-*' +WarningsAsErrors: 'clang-analyzer-*,readability-redundant-*,performance-*' +HeaderFilterRegex: '.*' +AnalyzeTemporaryDtors: false +FormatStyle: none +User: user diff --git a/MicroBenchmarks/libs/benchmark/AUTHORS b/MicroBenchmarks/libs/benchmark/AUTHORS --- a/MicroBenchmarks/libs/benchmark/AUTHORS +++ b/MicroBenchmarks/libs/benchmark/AUTHORS @@ -21,6 +21,8 @@ Deniz Evrenci Dirac Research Dominik Czarnota +Dominik Korman +Donald Aingworth Eric Backus Eric Fiselier Eugene Zhuk @@ -50,7 +52,9 @@ Radoslav Yovchev Roman Lebedev Sayan Bhattacharjee +Shapr3D Shuo Chen +Staffan Tjernstrom Steinar H. Gunderson Stripe, Inc. Tobias Schmidt diff --git a/MicroBenchmarks/libs/benchmark/BUILD.bazel b/MicroBenchmarks/libs/benchmark/BUILD.bazel --- a/MicroBenchmarks/libs/benchmark/BUILD.bazel +++ b/MicroBenchmarks/libs/benchmark/BUILD.bazel @@ -1,7 +1,19 @@ -load("@rules_cc//cc:defs.bzl", "cc_library") - licenses(["notice"]) +load("//:config/generate_export_header.bzl", "generate_export_header") + +posix_copts = [ + "-fvisibility=hidden", + "-fvisibility-inlines-hidden", +] + +# Generate header to provide ABI export symbols +generate_export_header( + out = "include/benchmark/export.h", + lib = "benchmark", + static_define = "BENCHMARK_STATIC_DEFINE", +) + config_setting( name = "qnx", constraint_values = ["@platforms//os:qnx"], @@ -29,13 +41,24 @@ ], exclude = ["src/benchmark_main.cc"], ), - hdrs = ["include/benchmark/benchmark.h"], + hdrs = [ + "include/benchmark/benchmark.h", + "include/benchmark/export.h", # From generate_export_header + ], linkopts = select({ ":windows": ["-DEFAULTLIB:shlwapi.lib"], "//conditions:default": ["-pthread"], }), strip_include_prefix = "include", visibility = ["//visibility:public"], + copts = select({ + ":windows": [], + "//conditions:default": posix_copts, + }), + local_defines = select({ + ":windows": ["benchmark_EXPORTS"], + "//conditions:default": [], + }), ) cc_library( @@ -45,6 +68,10 @@ strip_include_prefix = "include", visibility = ["//visibility:public"], deps = [":benchmark"], + copts = select({ + ":windows": [], + "//conditions:default": posix_copts, + }), ) cc_library( diff --git a/MicroBenchmarks/libs/benchmark/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/CMakeLists.txt @@ -13,18 +13,31 @@ endif() endforeach() -project (benchmark VERSION 1.5.4 LANGUAGES CXX) +project (benchmark VERSION 1.6.1 LANGUAGES CXX) option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) +option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON) +option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror regardless of compiler issues." OFF) + +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI") + # PGC++ maybe reporting false positives. + set(BENCHMARK_ENABLE_WERROR OFF) +endif() +if(BENCHMARK_FORCE_WERROR) + set(BENCHMARK_ENABLE_WERROR ON) +endif(BENCHMARK_FORCE_WERROR) + if(NOT MSVC) option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) else() set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) endif() option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) +option(BENCHMARK_ENABLE_DOXYGEN "Build documentation with Doxygen." OFF) +option(BENCHMARK_INSTALL_DOCS "Enable installation of documentation." ON) # Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which # may require downloading the source code. @@ -33,10 +46,14 @@ # This option can be used to disable building and running unit tests which depend on gtest # in cases where it is not possible to build or find a valid version of gtest. option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) +option(BENCHMARK_USE_BUNDLED_GTEST "Use bundled GoogleTest. If disabled, the find_package(GTest) will be used." ON) option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF) -set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) +# Export only public symbols +set(CMAKE_CXX_VISIBILITY_PRESET hidden) +set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) + if(MSVC) # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the @@ -109,9 +126,13 @@ string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) # Import our CMake modules -include(CheckCXXCompilerFlag) include(AddCXXCompilerFlag) +include(CheckCXXCompilerFlag) +include(CheckLibraryExists) include(CXXFeatureCheck) +include(GenerateExportHeader) + +check_library_exists(rt shm_open "" HAVE_LIB_RT) if (BENCHMARK_BUILD_32_BITS) add_required_cxx_compiler_flag(-m32) @@ -160,9 +181,11 @@ add_cxx_compiler_flag(-Wall) add_cxx_compiler_flag(-Wextra) add_cxx_compiler_flag(-Wshadow) - add_cxx_compiler_flag(-Werror RELEASE) - add_cxx_compiler_flag(-Werror RELWITHDEBINFO) - add_cxx_compiler_flag(-Werror MINSIZEREL) + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Werror RELEASE) + add_cxx_compiler_flag(-Werror RELWITHDEBINFO) + add_cxx_compiler_flag(-Werror MINSIZEREL) + endif() if (NOT BENCHMARK_ENABLE_TESTING) # Disable warning when compiling tests as gtest does not use 'override'. add_cxx_compiler_flag(-Wsuggest-override) @@ -181,9 +204,11 @@ add_cxx_compiler_flag(-wd1786) endif() # Disable deprecation warnings for release builds (when -Werror is enabled). - add_cxx_compiler_flag(-Wno-deprecated RELEASE) - add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) - add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Wno-deprecated RELEASE) + add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) + add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) + endif() if (NOT BENCHMARK_ENABLE_EXCEPTIONS) add_cxx_compiler_flag(-fno-exceptions) endif() @@ -307,7 +332,15 @@ if (BENCHMARK_ENABLE_GTEST_TESTS AND NOT (TARGET gtest AND TARGET gtest_main AND TARGET gmock AND TARGET gmock_main)) - include(GoogleTest) + if (BENCHMARK_USE_BUNDLED_GTEST) + include(GoogleTest) + else() + find_package(GTest CONFIG REQUIRED) + add_library(gtest ALIAS GTest::gtest) + add_library(gtest_main ALIAS GTest::gtest_main) + add_library(gmock ALIAS GTest::gmock) + add_library(gmock_main ALIAS GTest::gmock_main) + endif() endif() add_subdirectory(test) endif() diff --git a/MicroBenchmarks/libs/benchmark/CONTRIBUTORS b/MicroBenchmarks/libs/benchmark/CONTRIBUTORS --- a/MicroBenchmarks/libs/benchmark/CONTRIBUTORS +++ b/MicroBenchmarks/libs/benchmark/CONTRIBUTORS @@ -27,6 +27,7 @@ Alex Steele Andriy Berestovskyy Arne Beer +Bátor Tallér Billy Robert O'Neal III Chris Kennelly Christian Wassermann @@ -38,6 +39,8 @@ Deniz Evrenci Dominic Hamon Dominik Czarnota +Dominik Korman +Donald Aingworth Eric Backus Eric Fiselier Eugene Zhuk diff --git a/MicroBenchmarks/libs/benchmark/LICENSE b/MicroBenchmarks/libs/benchmark/LICENSE --- a/MicroBenchmarks/libs/benchmark/LICENSE +++ b/MicroBenchmarks/libs/benchmark/LICENSE @@ -200,3 +200,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + + BSD 3-Clause License + +Copyright (c) [year], [fullname] + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MicroBenchmarks/libs/benchmark/README.md b/MicroBenchmarks/libs/benchmark/README.md --- a/MicroBenchmarks/libs/benchmark/README.md +++ b/MicroBenchmarks/libs/benchmark/README.md @@ -27,14 +27,16 @@ BENCHMARK_MAIN(); ``` +## Getting Started + To get started, see [Requirements](#requirements) and [Installation](#installation). See [Usage](#usage) for a full example and the -[User Guide](#user-guide) for a more comprehensive feature overview. +[User Guide](docs/user_guide.md) for a more comprehensive feature overview. It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md) as some of the structural aspects of the APIs are similar. -### Resources +## Resources [Discussion group](https://groups.google.com/d/forum/benchmark-discuss) @@ -57,27 +59,25 @@ * Visual Studio 14 2015 * Intel 2015 Update 1 -See [Platform-Specific Build Instructions](#platform-specific-build-instructions). +See [Platform-Specific Build Instructions](docs/platform_specific_build_instructions.md). ## Installation This describes the installation process using cmake. As pre-requisites, you'll need git and cmake installed. -_See [dependencies.md](dependencies.md) for more details regarding supported +_See [dependencies.md](docs/dependencies.md) for more details regarding supported versions of build tools._ ```bash # Check out the library. $ git clone https://github.com/google/benchmark.git -# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. -$ git clone https://github.com/google/googletest.git benchmark/googletest # Go to the library root directory $ cd benchmark # Make a build directory to place the build output. $ cmake -E make_directory "build" -# Generate build system files with cmake. -$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../ +# Generate build system files with cmake, and download any dependencies. +$ cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../ # or, starting with CMake 3.13, use a simpler form: # cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build" # Build the library. @@ -111,10 +111,10 @@ Note that Google Benchmark requires Google Test to build and run the tests. This dependency can be provided two ways: -* Checkout the Google Test sources into `benchmark/googletest` as above. +* Checkout the Google Test sources into `benchmark/googletest`. * Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during - configuration, the library will automatically download and build any required - dependencies. + configuration as above, the library will automatically download and build + any required dependencies. If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` to `CMAKE_ARGS`. @@ -193,7 +193,7 @@ `BENCHMARK_MAIN();` above to get the same behavior. The compiled executable will run all benchmarks by default. Pass the `--help` -flag for option information or see the guide below. +flag for option information or see the [User Guide](docs/user_guide.md). ### Usage with CMake @@ -214,1187 +214,3 @@ ```cmake target_link_libraries(MyTarget benchmark::benchmark) ``` - -## Platform Specific Build Instructions - -### Building with GCC - -When the library is built using GCC it is necessary to link with the pthread -library due to how GCC implements `std::thread`. Failing to link to pthread will -lead to runtime exceptions (unless you're using libc++), not linker errors. See -[issue #67](https://github.com/google/benchmark/issues/67) for more details. You -can link to pthread by adding `-pthread` to your linker command. Note, you can -also use `-lpthread`, but there are potential issues with ordering of command -line parameters if you use that. - -On QNX, the pthread library is part of libc and usually included automatically -(see -[`pthread_create()`](https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.lib_ref/topic/p/pthread_create.html)). -There's no separate pthread library to link. - -### Building with Visual Studio 2015 or 2017 - -The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: - -``` -// Alternatively, can add libraries using linker options. -#ifdef _WIN32 -#pragma comment ( lib, "Shlwapi.lib" ) -#ifdef _DEBUG -#pragma comment ( lib, "benchmarkd.lib" ) -#else -#pragma comment ( lib, "benchmark.lib" ) -#endif -#endif -``` - -Can also use the graphical version of CMake: -* Open `CMake GUI`. -* Under `Where to build the binaries`, same path as source plus `build`. -* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. -* Click `Configure`, `Generate`, `Open Project`. -* If build fails, try deleting entire directory and starting again, or unticking options to build less. - -### Building with Intel 2015 Update 1 or Intel System Studio Update 4 - -See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. - -### Building on Solaris - -If you're running benchmarks on solaris, you'll want the kstat library linked in -too (`-lkstat`). - -## User Guide - -### Command Line - -[Output Formats](#output-formats) - -[Output Files](#output-files) - -[Running Benchmarks](#running-benchmarks) - -[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks) - -[Result Comparison](#result-comparison) - -[Extra Context](#extra-context) - -### Library - -[Runtime and Reporting Considerations](#runtime-and-reporting-considerations) - -[Passing Arguments](#passing-arguments) - -[Custom Benchmark Name](#custom-benchmark-name) - -[Calculating Asymptotic Complexity](#asymptotic-complexity) - -[Templated Benchmarks](#templated-benchmarks) - -[Fixtures](#fixtures) - -[Custom Counters](#custom-counters) - -[Multithreaded Benchmarks](#multithreaded-benchmarks) - -[CPU Timers](#cpu-timers) - -[Manual Timing](#manual-timing) - -[Setting the Time Unit](#setting-the-time-unit) - -[Random Interleaving](docs/random_interleaving.md) - -[User-Requested Performance Counters](docs/perf_counters.md) - -[Preventing Optimization](#preventing-optimization) - -[Reporting Statistics](#reporting-statistics) - -[Custom Statistics](#custom-statistics) - -[Using RegisterBenchmark](#using-register-benchmark) - -[Exiting with an Error](#exiting-with-an-error) - -[A Faster KeepRunning Loop](#a-faster-keep-running-loop) - -[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling) - - - - -### Output Formats - -The library supports multiple output formats. Use the -`--benchmark_format=` flag (or set the -`BENCHMARK_FORMAT=` environment variable) to set -the format type. `console` is the default format. - -The Console format is intended to be a human readable format. By default -the format generates color output. Context is output on stderr and the -tabular data on stdout. Example tabular output looks like: - -``` -Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------------- -BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s -BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s -BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s -``` - -The JSON format outputs human readable json split into two top level attributes. -The `context` attribute contains information about the run in general, including -information about the CPU and the date. -The `benchmarks` attribute contains a list of every benchmark run. Example json -output looks like: - -```json -{ - "context": { - "date": "2015/03/17-18:40:25", - "num_cpus": 40, - "mhz_per_cpu": 2801, - "cpu_scaling_enabled": false, - "build_type": "debug" - }, - "benchmarks": [ - { - "name": "BM_SetInsert/1024/1", - "iterations": 94877, - "real_time": 29275, - "cpu_time": 29836, - "bytes_per_second": 134066, - "items_per_second": 33516 - }, - { - "name": "BM_SetInsert/1024/8", - "iterations": 21609, - "real_time": 32317, - "cpu_time": 32429, - "bytes_per_second": 986770, - "items_per_second": 246693 - }, - { - "name": "BM_SetInsert/1024/10", - "iterations": 21393, - "real_time": 32724, - "cpu_time": 33355, - "bytes_per_second": 1199226, - "items_per_second": 299807 - } - ] -} -``` - -The CSV format outputs comma-separated values. The `context` is output on stderr -and the CSV itself on stdout. Example CSV output looks like: - -``` -name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label -"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, -"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, -"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, -``` - - - -### Output Files - -Write benchmark results to a file with the `--benchmark_out=` option -(or set `BENCHMARK_OUT`). Specify the output format with -`--benchmark_out_format={json|console|csv}` (or set -`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that the 'csv' reporter is -deprecated and the saved `.csv` file -[is not parsable](https://github.com/google/benchmark/issues/794) by csv -parsers. - -Specifying `--benchmark_out` does not suppress the console output. - - - -### Running Benchmarks - -Benchmarks are executed by running the produced binaries. Benchmarks binaries, -by default, accept options that may be specified either through their command -line interface or by setting environment variables before execution. For every -`--option_flag=` CLI switch, a corresponding environment variable -`OPTION_FLAG=` exist and is used as default if set (CLI switches always - prevails). A complete list of CLI options is available running benchmarks - with the `--help` switch. - - - -### Running a Subset of Benchmarks - -The `--benchmark_filter=` option (or `BENCHMARK_FILTER=` -environment variable) can be used to only run the benchmarks that match -the specified ``. For example: - -```bash -$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 -Run on (1 X 2300 MHz CPU ) -2016-06-25 19:34:24 -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_memcpy/32 11 ns 11 ns 79545455 -BM_memcpy/32k 2181 ns 2185 ns 324074 -BM_memcpy/32 12 ns 12 ns 54687500 -BM_memcpy/32k 1834 ns 1837 ns 357143 -``` - - - -### Result comparison - -It is possible to compare the benchmarking results. -See [Additional Tooling Documentation](docs/tools.md) - - - -### Extra Context - -Sometimes it's useful to add extra context to the content printed before the -results. By default this section includes information about the CPU on which -the benchmarks are running. If you do want to add more context, you can use -the `benchmark_context` command line flag: - -```bash -$ ./run_benchmarks --benchmark_context=pwd=`pwd` -Run on (1 x 2300 MHz CPU) -pwd: /home/user/benchmark/ -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_memcpy/32 11 ns 11 ns 79545455 -BM_memcpy/32k 2181 ns 2185 ns 324074 -``` - -You can get the same effect with the API: - -```c++ - benchmark::AddCustomContext("foo", "bar"); -``` - -Note that attempts to add a second value with the same key will fail with an -error message. - - - -### Runtime and Reporting Considerations - -When the benchmark binary is executed, each benchmark function is run serially. -The number of iterations to run is determined dynamically by running the -benchmark a few times and measuring the time taken and ensuring that the -ultimate result will be statistically stable. As such, faster benchmark -functions will be run for more iterations than slower benchmark functions, and -the number of iterations is thus reported. - -In all cases, the number of iterations for which the benchmark is run is -governed by the amount of time the benchmark takes. Concretely, the number of -iterations is at least one, not more than 1e9, until CPU time is greater than -the minimum time, or the wallclock time is 5x minimum time. The minimum time is -set per benchmark by calling `MinTime` on the registered benchmark object. - -Average timings are then reported over the iterations run. If multiple -repetitions are requested using the `--benchmark_repetitions` command-line -option, or at registration time, the benchmark function will be run several -times and statistical results across these repetitions will also be reported. - -As well as the per-benchmark entries, a preamble in the report will include -information about the machine on which the benchmarks are run. - - - -### Passing Arguments - -Sometimes a family of benchmarks can be implemented with just one routine that -takes an extra argument to specify which one of the family of benchmarks to -run. For example, the following code defines a family of benchmarks for -measuring the speed of `memcpy()` calls of different lengths: - -```c++ -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; - char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); - delete[] src; - delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following invocation will pick a few appropriate arguments in -the specified range and will generate a benchmark for each such argument. - -```c++ -BENCHMARK(BM_memcpy)->Range(8, 8<<10); -``` - -By default the arguments in the range are generated in multiples of eight and -the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the -range multiplier is changed to multiples of two. - -```c++ -BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); -``` - -Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. - -The preceding code shows a method of defining a sparse range. The following -example shows a method of defining a dense range. It is then used to benchmark -the performance of `std::vector` initialization for uniformly increasing sizes. - -```c++ -static void BM_DenseRange(benchmark::State& state) { - for(auto _ : state) { - std::vector v(state.range(0), state.range(0)); - benchmark::DoNotOptimize(v.data()); - benchmark::ClobberMemory(); - } -} -BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128); -``` - -Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ]. - -You might have a benchmark that depends on two or more inputs. For example, the -following code defines a family of benchmarks for measuring the speed of set -insertion. - -```c++ -static void BM_SetInsert(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following macro will pick a few appropriate arguments in the -product of the two specified ranges and will generate a benchmark for each such -pair. - -```c++ -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - -Some benchmarks may require specific argument values that cannot be expressed -with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a -benchmark input for each combination in the product of the supplied vectors. - -```c++ -BENCHMARK(BM_SetInsert) - ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) -// would generate the same benchmark arguments as -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 20}) - ->Args({3<<10, 20}) - ->Args({8<<10, 20}) - ->Args({3<<10, 40}) - ->Args({8<<10, 40}) - ->Args({1<<10, 40}) - ->Args({1<<10, 60}) - ->Args({3<<10, 60}) - ->Args({8<<10, 60}) - ->Args({1<<10, 80}) - ->Args({3<<10, 80}) - ->Args({8<<10, 80}); -``` - -For the most common scenarios, helper methods for creating a list of -integers for a given sparse or dense range are provided. - -```c++ -BENCHMARK(BM_SetInsert) - ->ArgsProduct({ - benchmark::CreateRange(8, 128, /*multi=*/2), - benchmark::CreateDenseRange(1, 4, /*step=*/1) - }) -// would generate the same benchmark arguments as -BENCHMARK(BM_SetInsert) - ->ArgsProduct({ - {8, 16, 32, 64, 128}, - {1, 2, 3, 4} - }); -``` - -For more complex patterns of inputs, passing a custom function to `Apply` allows -programmatic specification of an arbitrary set of arguments on which to run the -benchmark. The following example enumerates a dense range on one parameter, -and a sparse range on the second. - -```c++ -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); -``` - -#### Passing Arbitrary Arguments to a Benchmark - -In C++11 it is possible to define a benchmark that takes an arbitrary number -of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` -macro creates a benchmark that invokes `func` with the `benchmark::State` as -the first argument followed by the specified `args...`. -The `test_case_name` is appended to the name of the benchmark and -should describe the values passed. - -```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] -} -// Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. -BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -``` - -Note that elements of `...args` may refer to global variables. Users should -avoid modifying global state inside of a benchmark. - - - -### Calculating Asymptotic Complexity (Big O) - -Asymptotic complexity might be calculated for a family of benchmarks. The -following code will calculate the coefficient for the high-order term in the -running time and the normalized root-mean square error of string comparison. - -```c++ -static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); - for (auto _ : state) { - benchmark::DoNotOptimize(s1.compare(s2)); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); -``` - -As shown in the following invocation, asymptotic complexity might also be -calculated automatically. - -```c++ -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); -``` - -The following code will specify asymptotic complexity with a lambda function, -that might be used to customize high-order term calculation. - -```c++ -BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; }); -``` - - - -### Custom Benchmark Name - -You can change the benchmark's name as follows: - -```c++ -BENCHMARK(BM_memcpy)->Name("memcpy")->RangeMultiplier(2)->Range(8, 8<<10); -``` - -The invocation will execute the benchmark as before using `BM_memcpy` but changes -the prefix in the report to `memcpy`. - - - -### Templated Benchmarks - -This example produces and consumes messages of size `sizeof(v)` `range_x` -times. It also outputs throughput in the absence of multiprogramming. - -```c++ -template void BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); -``` - -Three macros are provided for adding benchmark templates. - -```c++ -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. -#else // C++ < C++11 -#define BENCHMARK_TEMPLATE(func, arg1) -#endif -#define BENCHMARK_TEMPLATE1(func, arg1) -#define BENCHMARK_TEMPLATE2(func, arg1, arg2) -``` - - - -### Fixtures - -Fixture tests are created by first defining a type that derives from -`::benchmark::Fixture` and then creating/registering the tests using the -following macros: - -* `BENCHMARK_F(ClassName, Method)` -* `BENCHMARK_DEFINE_F(ClassName, Method)` -* `BENCHMARK_REGISTER_F(ClassName, Method)` - -For Example: - -```c++ -class MyFixture : public benchmark::Fixture { -public: - void SetUp(const ::benchmark::State& state) { - } - - void TearDown(const ::benchmark::State& state) { - } -}; - -BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} -/* BarTest is NOT registered */ -BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); -/* BarTest is now registered */ -``` - -#### Templated Fixtures - -Also you can create templated fixture by using the following macros: - -* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` -* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` - -For example: - -```c++ -template -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); -``` - - - -### Custom Counters - -You can add your own counters with user-defined names. The example below -will add columns "Foo", "Bar" and "Baz" in its output: - -```c++ -static void UserCountersExample1(benchmark::State& state) { - double numFoos = 0, numBars = 0, numBazs = 0; - for (auto _ : state) { - // ... count Foo,Bar,Baz events - } - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -} -``` - -The `state.counters` object is a `std::map` with `std::string` keys -and `Counter` values. The latter is a `double`-like class, via an implicit -conversion to `double&`. Thus you can use all of the standard arithmetic -assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. - -In multithreaded benchmarks, each counter is set on the calling thread only. -When the benchmark finishes, the counters from each thread will be summed; -the resulting sum is the value which will be shown for the benchmark. - -The `Counter` constructor accepts three parameters: the value as a `double` -; a bit flag which allows you to show counters as rates, and/or as per-thread -iteration, and/or as per-thread averages, and/or iteration invariants, -and/or finally inverting the result; and a flag specifying the 'unit' - i.e. -is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024 -(`benchmark::Counter::OneK::kIs1024`)? - -```c++ - // sets a simple counter - state.counters["Foo"] = numFoos; - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark. - // Meaning: per one second, how many 'foo's are processed? - state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark, and the result inverted. - // Meaning: how many seconds it takes to process one 'foo'? - state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert); - - // Set the counter as a thread-average quantity. It will - // be presented divided by the number of threads. - state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); - - // There's also a combined flag: - state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); - - // This says that we process with the rate of state.range(0) bytes every iteration: - state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024); -``` - -When you're compiling in C++11 mode or later you can use `insert()` with -`std::initializer_list`: - -```c++ - // With C++11, this can be done: - state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); - // ... instead of: - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -``` - -#### Counter Reporting - -When using the console reporter, by default, user counters are printed at -the end after the table, the same way as ``bytes_processed`` and -``items_processed``. This is best for cases in which there are few counters, -or where there are only a couple of lines per benchmark. Here's an example of -the default output: - -``` ------------------------------------------------------------------------------- -Benchmark Time CPU Iterations UserCounters... ------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m -BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 -BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 -BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 -BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 -BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 -BM_Factorial 26 ns 26 ns 26608979 40320 -BM_Factorial/real_time 26 ns 26 ns 26587936 40320 -BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 -BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 -BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 -BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 -``` - -If this doesn't suit you, you can print each counter as a table column by -passing the flag `--benchmark_counters_tabular=true` to the benchmark -application. This is best for cases in which there are a lot of counters, or -a lot of lines per individual benchmark. Note that this will trigger a -reprinting of the table header any time the counter set changes between -individual benchmarks. Here's an example of corresponding output when -`--benchmark_counters_tabular=true` is passed: - -``` ---------------------------------------------------------------------------------------- -Benchmark Time CPU Iterations Bar Bat Baz Foo ---------------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 -BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 -BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 -BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 -BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 -BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 -BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 -BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 --------------------------------------------------------------- -Benchmark Time CPU Iterations --------------------------------------------------------------- -BM_Factorial 26 ns 26 ns 26392245 40320 -BM_Factorial/real_time 26 ns 26 ns 26494107 40320 -BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 -BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 -BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 -BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 -BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 -BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 -BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 -BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 -BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 -``` - -Note above the additional header printed when the benchmark changes from -``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does -not have the same counter set as ``BM_UserCounter``. - - - -### Multithreaded Benchmarks - -In a multithreaded test (benchmark invoked by multiple threads simultaneously), -it is guaranteed that none of the threads will start until all have reached -the start of the benchmark loop, and all will have finished before any thread -exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` -API) As such, any global setup or teardown can be wrapped in a check against the thread -index: - -```c++ -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(2); -``` - -If the benchmarked code itself uses threads and you want to compare it to -single-threaded code, you may want to use real-time ("wallclock") measurements -for latency comparisons: - -```c++ -BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); -``` - -Without `UseRealTime`, CPU time is used by default. - - - -### CPU Timers - -By default, the CPU timer only measures the time spent by the main thread. -If the benchmark itself uses threads internally, this measurement may not -be what you are looking for. Instead, there is a way to measure the total -CPU usage of the process, by all the threads. - -```c++ -void callee(int i); - -static void MyMain(int size) { -#pragma omp parallel for - for(int i = 0; i < size; i++) - callee(i); -} - -static void BM_OpenMP(benchmark::State& state) { - for (auto _ : state) - MyMain(state.range(0)); -} - -// Measure the time spent by the main thread, use it to decide for how long to -// run the benchmark loop. Depending on the internal implementation detail may -// measure to anywhere from near-zero (the overhead spent before/after work -// handoff to worker thread[s]) to the whole single-thread time. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10); - -// Measure the user-visible time, the wall clock (literally, the time that -// has passed on the clock on the wall), use it to decide for how long to -// run the benchmark loop. This will always be meaningful, an will match the -// time spent by the main thread in single-threaded case, in general decreasing -// with the number of internal threads doing the work. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime(); - -// Measure the total CPU consumption, use it to decide for how long to -// run the benchmark loop. This will always measure to no less than the -// time spent by the main thread in single-threaded case. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime(); - -// A mixture of the last two. Measure the total CPU consumption, but use the -// wall clock to decide for how long to run the benchmark loop. -BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime(); -``` - -#### Controlling Timers - -Normally, the entire duration of the work loop (`for (auto _ : state) {}`) -is measured. But sometimes, it is necessary to do some work inside of -that loop, every iteration, but without counting that time to the benchmark time. -That is possible, although it is not recommended, since it has high overhead. - -```c++ -static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); // Stop timers. They will not count until they are resumed. - data = ConstructRandomSet(state.range(0)); // Do something that should not be measured - state.ResumeTiming(); // And resume timers. They are now counting again. - // The rest will be measured. - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - - - -### Manual Timing - -For benchmarking something for which neither CPU time nor real-time are -correct or accurate enough, completely manual timing is supported using -the `UseManualTime` function. - -When `UseManualTime` is used, the benchmarked code must call -`SetIterationTime` once per iteration of the benchmark loop to -report the manually measured time. - -An example use case for this is benchmarking GPU execution (e.g. OpenCL -or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot -be accurately measured using CPU time or real-time. Instead, they can be -measured accurately using a dedicated API, and these measurement results -can be reported back with `SetIterationTime`. - -```c++ -static void BM_ManualTiming(benchmark::State& state) { - int microseconds = state.range(0); - std::chrono::duration sleep_duration { - static_cast(microseconds) - }; - - for (auto _ : state) { - auto start = std::chrono::high_resolution_clock::now(); - // Simulate some useful workload with a sleep - std::this_thread::sleep_for(sleep_duration); - auto end = std::chrono::high_resolution_clock::now(); - - auto elapsed_seconds = - std::chrono::duration_cast>( - end - start); - - state.SetIterationTime(elapsed_seconds.count()); - } -} -BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); -``` - - - -### Setting the Time Unit - -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -```c++ -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -``` - - - -### Preventing Optimization - -To prevent a value or expression from being optimized away by the compiler -the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` -functions can be used. - -```c++ -static void BM_test(benchmark::State& state) { - for (auto _ : state) { - int x = 0; - for (int i=0; i < 64; ++i) { - benchmark::DoNotOptimize(x += i); - } - } -} -``` - -`DoNotOptimize()` forces the *result* of `` to be stored in either -memory or a register. For GNU based compilers it acts as read/write barrier -for global memory. More specifically it forces the compiler to flush pending -writes to memory and reload any other values as necessary. - -Note that `DoNotOptimize()` does not prevent optimizations on `` -in any way. `` may even be removed entirely when the result is already -known. For example: - -```c++ - /* Example 1: `` is removed entirely. */ - int foo(int x) { return x + 42; } - while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); - - /* Example 2: Result of '' is only reused */ - int bar(int) __attribute__((const)); - while (...) DoNotOptimize(bar(0)); // Optimized to: - // int __result__ = bar(0); - // while (...) DoNotOptimize(__result__); -``` - -The second tool for preventing optimizations is `ClobberMemory()`. In essence -`ClobberMemory()` forces the compiler to perform all pending writes to global -memory. Memory managed by block scope objects must be "escaped" using -`DoNotOptimize(...)` before it can be clobbered. In the below example -`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized -away. - -```c++ -static void BM_vector_push_back(benchmark::State& state) { - for (auto _ : state) { - std::vector v; - v.reserve(1); - benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. - v.push_back(42); - benchmark::ClobberMemory(); // Force 42 to be written to memory. - } -} -``` - -Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. - - - -### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks - -By default each benchmark is run once and that single result is reported. -However benchmarks are often noisy and a single result may not be representative -of the overall behavior. For this reason it's possible to repeatedly rerun the -benchmark. - -The number of runs of each benchmark is specified globally by the -`--benchmark_repetitions` flag or on a per benchmark basis by calling -`Repetitions` on the registered benchmark object. When a benchmark is run more -than once the mean, median and standard deviation of the runs will be reported. - -Additionally the `--benchmark_report_aggregates_only={true|false}`, -`--benchmark_display_aggregates_only={true|false}` flags or -`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be -used to change how repeated tests are reported. By default the result of each -repeated run is reported. When `report aggregates only` option is `true`, -only the aggregates (i.e. mean, median and standard deviation, maybe complexity -measurements if they were requested) of the runs is reported, to both the -reporters - standard output (console), and the file. -However when only the `display aggregates only` option is `true`, -only the aggregates are displayed in the standard output, while the file -output still contains everything. -Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a -registered benchmark object overrides the value of the appropriate flag for that -benchmark. - - - -### Custom Statistics - -While having mean, median and standard deviation is nice, this may not be -enough for everyone. For example you may want to know what the largest -observation is, e.g. because you have some real-time constraints. This is easy. -The following code will specify a custom statistic to be calculated, defined -by a lambda function. - -```c++ -void BM_spin_empty(benchmark::State& state) { - for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK(BM_spin_empty) - ->ComputeStatistics("max", [](const std::vector& v) -> double { - return *(std::max_element(std::begin(v), std::end(v))); - }) - ->Arg(512); -``` - - - -### Using RegisterBenchmark(name, fn, args...) - -The `RegisterBenchmark(name, func, args...)` function provides an alternative -way to create and register benchmarks. -`RegisterBenchmark(name, func, args...)` creates, registers, and returns a -pointer to a new benchmark with the specified `name` that invokes -`func(st, args...)` where `st` is a `benchmark::State` object. - -Unlike the `BENCHMARK` registration macros, which can only be used at the global -scope, the `RegisterBenchmark` can be called anywhere. This allows for -benchmark tests to be registered programmatically. - -Additionally `RegisterBenchmark` allows any callable object to be registered -as a benchmark. Including capturing lambdas and function objects. - -For Example: -```c++ -auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; - -int main(int argc, char** argv) { - for (auto& test_input : { /* ... */ }) - benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); - benchmark::Shutdown(); -} -``` - - - -### Exiting with an Error - -When errors caused by external influences, such as file I/O and network -communication, occur within a benchmark the -`State::SkipWithError(const char* msg)` function can be used to skip that run -of benchmark and report the error. Note that only future iterations of the -`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop -Users must explicitly exit the loop, otherwise all iterations will be performed. -Users may explicitly return to exit the benchmark immediately. - -The `SkipWithError(...)` function may be used at any point within the benchmark, -including before and after the benchmark loop. Moreover, if `SkipWithError(...)` -has been used, it is not required to reach the benchmark loop and one may return -from the benchmark function early. - -For example: - -```c++ -static void BM_test(benchmark::State& state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - // KeepRunning() loop will not be entered. - } - while (state.KeepRunning()) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // Needed to skip the rest of the iteration. - } - do_stuff(data); - } -} - -static void BM_test_ranged_fo(benchmark::State & state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - return; // Early return is allowed when SkipWithError() has been used. - } - for (auto _ : state) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // REQUIRED to prevent all further iterations. - } - do_stuff(data); - } -} -``` - - -### A Faster KeepRunning Loop - -In C++11 mode, a ranged-based for loop should be used in preference to -the `KeepRunning` loop for running the benchmarks. For example: - -```c++ -static void BM_Fast(benchmark::State &state) { - for (auto _ : state) { - FastOperation(); - } -} -BENCHMARK(BM_Fast); -``` - -The reason the ranged-for loop is faster than using `KeepRunning`, is -because `KeepRunning` requires a memory load and store of the iteration count -ever iteration, whereas the ranged-for variant is able to keep the iteration count -in a register. - -For example, an empty inner loop of using the ranged-based for method looks like: - -```asm -# Loop Init - mov rbx, qword ptr [r14 + 104] - call benchmark::State::StartKeepRunning() - test rbx, rbx - je .LoopEnd -.LoopHeader: # =>This Inner Loop Header: Depth=1 - add rbx, -1 - jne .LoopHeader -.LoopEnd: -``` - -Compared to an empty `KeepRunning` loop, which looks like: - -```asm -.LoopHeader: # in Loop: Header=BB0_3 Depth=1 - cmp byte ptr [rbx], 1 - jne .LoopInit -.LoopBody: # =>This Inner Loop Header: Depth=1 - mov rax, qword ptr [rbx + 8] - lea rcx, [rax + 1] - mov qword ptr [rbx + 8], rcx - cmp rax, qword ptr [rbx + 104] - jb .LoopHeader - jmp .LoopEnd -.LoopInit: - mov rdi, rbx - call benchmark::State::StartKeepRunning() - jmp .LoopBody -.LoopEnd: -``` - -Unless C++03 compatibility is required, the ranged-for variant of writing -the benchmark loop should be preferred. - - - -### Disabling CPU Frequency Scaling - -If you see this error: - -``` -***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. -``` - -you might want to disable the CPU frequency scaling while running the benchmark: - -```bash -sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave -``` diff --git a/MicroBenchmarks/libs/benchmark/WORKSPACE b/MicroBenchmarks/libs/benchmark/WORKSPACE --- a/MicroBenchmarks/libs/benchmark/WORKSPACE +++ b/MicroBenchmarks/libs/benchmark/WORKSPACE @@ -1,13 +1,7 @@ workspace(name = "com_github_google_benchmark") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "rules_cc", - strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912", - urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"], - sha256 = "d7dc12c1d5bc1a87474de8e3d17b7731a4dcebcfb8aa3990fe8ac7734ef12f2f", -) +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") http_archive( name = "com_google_absl", @@ -16,11 +10,10 @@ urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], ) -http_archive( +git_repository( name = "com_google_googletest", - strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", - urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], - sha256 = "8f827dd550db8b4fdf73904690df0be9fccc161017c9038a724bc9a0617a1bc8", + remote = "https://github.com/google/googletest.git", + tag = "release-1.11.0", ) http_archive( diff --git a/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py b/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py --- a/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py +++ b/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py @@ -66,7 +66,7 @@ "oLambda", ] -__version__ = "0.2.0" +__version__ = "1.6.1" class __OptionMaker: diff --git a/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/benchmark.cc b/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/benchmark.cc --- a/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/benchmark.cc +++ b/MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/benchmark.cc @@ -1,5 +1,7 @@ // Benchmark for Python. +#include "benchmark/benchmark.h" + #include #include #include @@ -9,8 +11,6 @@ #include "pybind11/stl.h" #include "pybind11/stl_bind.h" -#include "benchmark/benchmark.h" - PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); namespace { @@ -165,12 +165,12 @@ &State::SetComplexityN) .def_property("items_processed", &State::items_processed, &State::SetItemsProcessed) - .def("set_label", (void (State::*)(const char*)) & State::SetLabel) + .def("set_label", (void(State::*)(const char*)) & State::SetLabel) .def("range", &State::range, py::arg("pos") = 0) .def_property_readonly("iterations", &State::iterations) .def_readwrite("counters", &State::counters) - .def_readonly("thread_index", &State::thread_index) - .def_readonly("threads", &State::threads); + .def_property_readonly("thread_index", &State::thread_index) + .def_property_readonly("threads", &State::threads); m.def("Initialize", Initialize); m.def("RegisterBenchmark", RegisterBenchmark, diff --git a/MicroBenchmarks/libs/benchmark/cmake/Config.cmake.in b/MicroBenchmarks/libs/benchmark/cmake/Config.cmake.in --- a/MicroBenchmarks/libs/benchmark/cmake/Config.cmake.in +++ b/MicroBenchmarks/libs/benchmark/cmake/Config.cmake.in @@ -1 +1,7 @@ +@PACKAGE_INIT@ + +include (CMakeFindDependencyMacro) + +find_dependency (Threads) + include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake b/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake --- a/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake +++ b/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake @@ -29,13 +29,24 @@ include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) +# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves. +if (MSVC) + add_compile_options(/wd4244 /wd4722) +else() + add_compile_options(-w) +endif() + # Add googletest directly to our build. This defines # the gtest and gtest_main targets. add_subdirectory(${GOOGLETEST_SOURCE_DIR} ${GOOGLETEST_BINARY_DIR} EXCLUDE_FROM_ALL) -set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) -set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $) +if(NOT DEFINED GTEST_COMPILE_COMMANDS) + set(GTEST_COMPILE_COMMANDS ON) +endif() + +set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) diff --git a/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake.in b/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake.in --- a/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake.in +++ b/MicroBenchmarks/libs/benchmark/cmake/GoogleTest.cmake.in @@ -31,13 +31,14 @@ ) else() if(NOT ALLOW_DOWNLOADING_GOOGLETEST) - message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") + message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_USE_BUNDLED_GTEST, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") + return() else() message(WARNING "Did not find Google Test sources! Fetching from web...") ExternalProject_Add( googletest GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG master + GIT_TAG "release-1.11.0" PREFIX "${CMAKE_BINARY_DIR}" STAMP_DIR "${CMAKE_BINARY_DIR}/stamp" DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" diff --git a/MicroBenchmarks/libs/benchmark/cmake/Modules/FindPFM.cmake b/MicroBenchmarks/libs/benchmark/cmake/Modules/FindPFM.cmake --- a/MicroBenchmarks/libs/benchmark/cmake/Modules/FindPFM.cmake +++ b/MicroBenchmarks/libs/benchmark/cmake/Modules/FindPFM.cmake @@ -3,8 +3,14 @@ # Set BENCHMARK_ENABLE_LIBPFM to 0 to disable, regardless of libpfm presence. include(CheckIncludeFile) include(CheckLibraryExists) +include(FeatureSummary) enable_language(C) +set_package_properties(PFM PROPERTIES + URL http://perfmon2.sourceforge.net/ + DESCRIPTION "a helper library to develop monitoring tools" + PURPOSE "Used to program specific performance monitoring events") + check_library_exists(libpfm.a pfm_initialize "" HAVE_LIBPFM_INITIALIZE) if(HAVE_LIBPFM_INITIALIZE) check_include_file(perfmon/perf_event.h HAVE_PERFMON_PERF_EVENT_H) @@ -13,6 +19,7 @@ if(HAVE_PERFMON_PERF_EVENT_H AND HAVE_PERFMON_PFMLIB_H AND HAVE_PERFMON_PFMLIB_PERF_EVENT_H) message("Using Perf Counters.") set(HAVE_LIBPFM 1) + set(PFM_FOUND 1) endif() else() message("Perf Counters support requested, but was unable to find libpfm.") diff --git a/MicroBenchmarks/libs/benchmark/config/generate_export_header.bzl b/MicroBenchmarks/libs/benchmark/config/generate_export_header.bzl new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/config/generate_export_header.bzl @@ -0,0 +1,166 @@ +# +# Originl file is located at: +# https://github.com/RobotLocomotion/drake/blob/bad032aeb09b13c7f8c87ed64b624c8d1e9adb30/tools/workspace/generate_export_header.bzl +# +# All components of Drake are licensed under the BSD 3-Clause License +# shown below. Where noted in the source code, some portions may +# be subject to other permissive, non-viral licenses. +# +# Copyright 2012-2016 Robot Locomotion Group @ CSAIL +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. Redistributions +# in binary form must reproduce the above copyright notice, this list of +# conditions and the following disclaimer in the documentation and/or +# other materials provided with the distribution. Neither the name of +# the Massachusetts Institute of Technology nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# -*- python -*- + +# Defines the implementation actions to generate_export_header. +def _generate_export_header_impl(ctx): + windows_constraint = ctx.attr._windows_constraint[platform_common.ConstraintValueInfo] + output = ctx.outputs.out + + if ctx.target_platform_has_constraint(windows_constraint): + export_attr = "__declspec(dllexport)" + import_attr = "__declspec(dllimport)" + no_export_attr = "" + deprecated_attr = "__declspec(deprecated)" + else: + export_attr = "__attribute__((visibility(\"default\")))" + import_attr = "__attribute__((visibility(\"default\")))" + no_export_attr = "__attribute__((visibility(\"hidden\")))" + deprecated_attr = "__attribute__((__deprecated__))" + + content = [ + "#ifndef %s_H" % ctx.attr.export_macro_name, + "#define %s_H" % ctx.attr.export_macro_name, + "", + "#ifdef %s" % ctx.attr.static_define, + "# define %s" % ctx.attr.export_macro_name, + "# define %s" % ctx.attr.no_export_macro_name, + "#else", + "# ifndef %s" % ctx.attr.export_macro_name, + "# ifdef %s" % ctx.attr.export_import_condition, + "# define %s %s" % (ctx.attr.export_macro_name, export_attr), + "# else", + "# define %s %s" % (ctx.attr.export_macro_name, import_attr), + "# endif", + "# endif", + "# ifndef %s" % ctx.attr.no_export_macro_name, + "# define %s %s" % (ctx.attr.no_export_macro_name, no_export_attr), + "# endif", + "#endif", + "", + "#ifndef %s" % ctx.attr.deprecated_macro_name, + "# define %s %s" % (ctx.attr.deprecated_macro_name, deprecated_attr), + "#endif", + "", + "#ifndef %s" % ctx.attr.export_deprecated_macro_name, + "# define %s %s %s" % (ctx.attr.export_deprecated_macro_name, ctx.attr.export_macro_name, ctx.attr.deprecated_macro_name), # noqa + "#endif", + "", + "#ifndef %s" % ctx.attr.no_export_deprecated_macro_name, + "# define %s %s %s" % (ctx.attr.no_export_deprecated_macro_name, ctx.attr.no_export_macro_name, ctx.attr.deprecated_macro_name), # noqa + "#endif", + "", + "#endif", + ] + + ctx.actions.write(output = output, content = "\n".join(content) + "\n") + +# Defines the rule to generate_export_header. +_generate_export_header_gen = rule( + attrs = { + "out": attr.output(mandatory = True), + "export_import_condition": attr.string(), + "export_macro_name": attr.string(), + "deprecated_macro_name": attr.string(), + "export_deprecated_macro_name": attr.string(), + "no_export_macro_name": attr.string(), + "no_export_deprecated_macro_name": attr.string(), + "static_define": attr.string(), + "_windows_constraint": attr.label(default = "@platforms//os:windows"), + }, + output_to_genfiles = True, + implementation = _generate_export_header_impl, +) + +def generate_export_header( + lib = None, + name = None, + out = None, + export_import_condition = None, + export_macro_name = None, + deprecated_macro_name = None, + export_deprecated_macro_name = None, + no_export_macro_name = None, + no_export_deprecated_macro_name = None, + static_define = None, + **kwargs): + """Creates a rule to generate an export header for a named library. This + is an incomplete implementation of CMake's generate_export_header. (In + particular, it assumes a platform that uses + __attribute__((visibility("default"))) to decorate exports.) + + By default, the rule will have a mangled name related to the library name, + and will produce "_export.h". + + The CMake documentation of the generate_export_header macro is: + https://cmake.org/cmake/help/latest/module/GenerateExportHeader.html + + """ + + if name == None: + name = "__%s_export_h" % lib + if out == None: + out = "%s_export.h" % lib + if export_import_condition == None: + # CMake does not uppercase the _EXPORTS define. + export_import_condition = "%s_EXPORTS" % lib + if export_macro_name == None: + export_macro_name = "%s_EXPORT" % lib.upper() + if deprecated_macro_name == None: + deprecated_macro_name = "%s_DEPRECATED" % lib.upper() + if export_deprecated_macro_name == None: + export_deprecated_macro_name = "%s_DEPRECATED_EXPORT" % lib.upper() + if no_export_macro_name == None: + no_export_macro_name = "%s_NO_EXPORT" % lib.upper() + if no_export_deprecated_macro_name == None: + no_export_deprecated_macro_name = \ + "%s_DEPRECATED_NO_EXPORT" % lib.upper() + if static_define == None: + static_define = "%s_STATIC_DEFINE" % lib.upper() + + _generate_export_header_gen( + name = name, + out = out, + export_import_condition = export_import_condition, + export_macro_name = export_macro_name, + deprecated_macro_name = deprecated_macro_name, + export_deprecated_macro_name = export_deprecated_macro_name, + no_export_macro_name = no_export_macro_name, + no_export_deprecated_macro_name = no_export_deprecated_macro_name, + static_define = static_define, + **kwargs + ) diff --git a/MicroBenchmarks/libs/benchmark/docs/_config.yml b/MicroBenchmarks/libs/benchmark/docs/_config.yml --- a/MicroBenchmarks/libs/benchmark/docs/_config.yml +++ b/MicroBenchmarks/libs/benchmark/docs/_config.yml @@ -1 +1 @@ -theme: jekyll-theme-hacker \ No newline at end of file +theme: jekyll-theme-minimal \ No newline at end of file diff --git a/MicroBenchmarks/libs/benchmark/dependencies.md b/MicroBenchmarks/libs/benchmark/docs/dependencies.md rename from MicroBenchmarks/libs/benchmark/dependencies.md rename to MicroBenchmarks/libs/benchmark/docs/dependencies.md --- a/MicroBenchmarks/libs/benchmark/dependencies.md +++ b/MicroBenchmarks/libs/benchmark/docs/dependencies.md @@ -3,16 +3,17 @@ To ensure the broadest compatibility when building the benchmark library, but still allow forward progress, we require any build tooling to be available for: -* Debian stable AND -* The last two Ubuntu LTS releases AND +* Debian stable _and_ +* The last two Ubuntu LTS releases Currently, this means using build tool versions that are available for Ubuntu -16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch. +18.04 (Bionic Beaver), Ubuntu 20.04 (Focal Fossa), and Debian 11 (bullseye). -_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._ +_Note, CI also runs ubuntu-16.04 and ubuntu-14.04 to ensure best effort support +for older versions._ ## cmake The current supported version is cmake 3.5.1 as of 2018-06-06. -_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS +_Note, this version is also available for Ubuntu 14.04, an older Ubuntu LTS release, as `cmake3`._ diff --git a/MicroBenchmarks/libs/benchmark/docs/index.md b/MicroBenchmarks/libs/benchmark/docs/index.md new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/docs/index.md @@ -0,0 +1,10 @@ +# Benchmark + +* [Assembly Tests](AssemblyTests.md) +* [Dependencies](dependencies.md) +* [Perf Counters](perf_counters.md) +* [Platform Specific Build Instructions](platform_specific_build_instructions.md) +* [Random Interleaving](random_interleaving.md) +* [Releasing](releasing.md) +* [Tools](tools.md) +* [User Guide](user_guide.md) \ No newline at end of file diff --git a/MicroBenchmarks/libs/benchmark/docs/platform_specific_build_instructions.md b/MicroBenchmarks/libs/benchmark/docs/platform_specific_build_instructions.md new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/docs/platform_specific_build_instructions.md @@ -0,0 +1,48 @@ +# Platform Specific Build Instructions + +## Building with GCC + +When the library is built using GCC it is necessary to link with the pthread +library due to how GCC implements `std::thread`. Failing to link to pthread will +lead to runtime exceptions (unless you're using libc++), not linker errors. See +[issue #67](https://github.com/google/benchmark/issues/67) for more details. You +can link to pthread by adding `-pthread` to your linker command. Note, you can +also use `-lpthread`, but there are potential issues with ordering of command +line parameters if you use that. + +On QNX, the pthread library is part of libc and usually included automatically +(see +[`pthread_create()`](https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.lib_ref/topic/p/pthread_create.html)). +There's no separate pthread library to link. + +## Building with Visual Studio 2015 or 2017 + +The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: + +``` +// Alternatively, can add libraries using linker options. +#ifdef _WIN32 +#pragma comment ( lib, "Shlwapi.lib" ) +#ifdef _DEBUG +#pragma comment ( lib, "benchmarkd.lib" ) +#else +#pragma comment ( lib, "benchmark.lib" ) +#endif +#endif +``` + +Can also use the graphical version of CMake: +* Open `CMake GUI`. +* Under `Where to build the binaries`, same path as source plus `build`. +* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. +* Click `Configure`, `Generate`, `Open Project`. +* If build fails, try deleting entire directory and starting again, or unticking options to build less. + +## Building with Intel 2015 Update 1 or Intel System Studio Update 4 + +See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. + +## Building on Solaris + +If you're running benchmarks on solaris, you'll want the kstat library linked in +too (`-lkstat`). \ No newline at end of file diff --git a/MicroBenchmarks/libs/benchmark/docs/releasing.md b/MicroBenchmarks/libs/benchmark/docs/releasing.md --- a/MicroBenchmarks/libs/benchmark/docs/releasing.md +++ b/MicroBenchmarks/libs/benchmark/docs/releasing.md @@ -8,10 +8,23 @@ * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of commits between the last annotated tag and HEAD * Pick the most interesting. -* Create one last commit that updates the version saved in `CMakeLists.txt` to the release version you're creating. (This version will be used if benchmark is installed from the archive you'll be creating in the next step.) +* Create one last commit that updates the version saved in `CMakeLists.txt` and the + `__version__` variable in `bindings/python/google_benchmark/__init__.py`to the release + version you're creating. (This version will be used if benchmark is installed from the + archive you'll be creating in the next step.) ``` -project (benchmark VERSION 1.5.3 LANGUAGES CXX) +project (benchmark VERSION 1.6.0 LANGUAGES CXX) +``` + +```python +# bindings/python/google_benchmark/__init__.py + +# ... + +__version__ = "1.6.0" # <-- change this to the release version you are creating + +# ... ``` * Create a release through github's interface @@ -19,4 +32,4 @@ * Update this to an annotated tag: * `git pull --tags` * `git tag -a -f ` - * `git push --force origin` + * `git push --force --tags origin` diff --git a/MicroBenchmarks/libs/benchmark/README.md b/MicroBenchmarks/libs/benchmark/docs/user_guide.md copy from MicroBenchmarks/libs/benchmark/README.md copy to MicroBenchmarks/libs/benchmark/docs/user_guide.md --- a/MicroBenchmarks/libs/benchmark/README.md +++ b/MicroBenchmarks/libs/benchmark/docs/user_guide.md @@ -1,272 +1,6 @@ -# Benchmark +# User Guide -[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test) -[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml) -[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint) -[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings) - -[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) -[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) - - -A library to benchmark code snippets, similar to unit tests. Example: - -```c++ -#include - -static void BM_SomeFunction(benchmark::State& state) { - // Perform setup here - for (auto _ : state) { - // This code gets timed - SomeFunction(); - } -} -// Register the function as a benchmark -BENCHMARK(BM_SomeFunction); -// Run the benchmark -BENCHMARK_MAIN(); -``` - -To get started, see [Requirements](#requirements) and -[Installation](#installation). See [Usage](#usage) for a full example and the -[User Guide](#user-guide) for a more comprehensive feature overview. - -It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md) -as some of the structural aspects of the APIs are similar. - -### Resources - -[Discussion group](https://groups.google.com/d/forum/benchmark-discuss) - -IRC channels: -* [libera](https://libera.chat) #benchmark - -[Additional Tooling Documentation](docs/tools.md) - -[Assembly Testing Documentation](docs/AssemblyTests.md) - -## Requirements - -The library can be used with C++03. However, it requires C++11 to build, -including compiler and standard library support. - -The following minimum versions are required to build the library: - -* GCC 4.8 -* Clang 3.4 -* Visual Studio 14 2015 -* Intel 2015 Update 1 - -See [Platform-Specific Build Instructions](#platform-specific-build-instructions). - -## Installation - -This describes the installation process using cmake. As pre-requisites, you'll -need git and cmake installed. - -_See [dependencies.md](dependencies.md) for more details regarding supported -versions of build tools._ - -```bash -# Check out the library. -$ git clone https://github.com/google/benchmark.git -# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. -$ git clone https://github.com/google/googletest.git benchmark/googletest -# Go to the library root directory -$ cd benchmark -# Make a build directory to place the build output. -$ cmake -E make_directory "build" -# Generate build system files with cmake. -$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../ -# or, starting with CMake 3.13, use a simpler form: -# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build" -# Build the library. -$ cmake --build "build" --config Release -``` -This builds the `benchmark` and `benchmark_main` libraries and tests. -On a unix system, the build directory should now look something like this: - -``` -/benchmark - /build - /src - /libbenchmark.a - /libbenchmark_main.a - /test - ... -``` - -Next, you can run the tests to check the build. - -```bash -$ cmake -E chdir "build" ctest --build-config Release -``` - -If you want to install the library globally, also run: - -``` -sudo cmake --build "build" --config Release --target install -``` - -Note that Google Benchmark requires Google Test to build and run the tests. This -dependency can be provided two ways: - -* Checkout the Google Test sources into `benchmark/googletest` as above. -* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during - configuration, the library will automatically download and build any required - dependencies. - -If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` -to `CMAKE_ARGS`. - -### Debug vs Release - -By default, benchmark builds as a debug library. You will see a warning in the -output when this is the case. To build it as a release library instead, add -`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown -above. The use of `--config Release` in build commands is needed to properly -support multi-configuration tools (like Visual Studio for example) and can be -skipped for other build systems (like Makefile). - -To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when -generating the build system files. - -If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake -cache variables, if autodetection fails. - -If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, -`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. - -### Stable and Experimental Library Versions - -The main branch contains the latest stable version of the benchmarking library; -the API of which can be considered largely stable, with source breaking changes -being made only upon the release of a new major version. - -Newer, experimental, features are implemented and tested on the -[`v2` branch](https://github.com/google/benchmark/tree/v2). Users who wish -to use, test, and provide feedback on the new features are encouraged to try -this branch. However, this branch provides no stability guarantees and reserves -the right to change and break the API at any time. - -## Usage - -### Basic usage - -Define a function that executes the code to measure, register it as a benchmark -function using the `BENCHMARK` macro, and ensure an appropriate `main` function -is available: - -```c++ -#include - -static void BM_StringCreation(benchmark::State& state) { - for (auto _ : state) - std::string empty_string; -} -// Register the function as a benchmark -BENCHMARK(BM_StringCreation); - -// Define another benchmark -static void BM_StringCopy(benchmark::State& state) { - std::string x = "hello"; - for (auto _ : state) - std::string copy(x); -} -BENCHMARK(BM_StringCopy); - -BENCHMARK_MAIN(); -``` - -To run the benchmark, compile and link against the `benchmark` library -(libbenchmark.a/.so). If you followed the build steps above, this library will -be under the build directory you created. - -```bash -# Example on linux after running the build steps above. Assumes the -# `benchmark` and `build` directories are under the current directory. -$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \ - -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark -``` - -Alternatively, link against the `benchmark_main` library and remove -`BENCHMARK_MAIN();` above to get the same behavior. - -The compiled executable will run all benchmarks by default. Pass the `--help` -flag for option information or see the guide below. - -### Usage with CMake - -If using CMake, it is recommended to link against the project-provided -`benchmark::benchmark` and `benchmark::benchmark_main` targets using -`target_link_libraries`. -It is possible to use ```find_package``` to import an installed version of the -library. -```cmake -find_package(benchmark REQUIRED) -``` -Alternatively, ```add_subdirectory``` will incorporate the library directly in -to one's CMake project. -```cmake -add_subdirectory(benchmark) -``` -Either way, link to the library as follows. -```cmake -target_link_libraries(MyTarget benchmark::benchmark) -``` - -## Platform Specific Build Instructions - -### Building with GCC - -When the library is built using GCC it is necessary to link with the pthread -library due to how GCC implements `std::thread`. Failing to link to pthread will -lead to runtime exceptions (unless you're using libc++), not linker errors. See -[issue #67](https://github.com/google/benchmark/issues/67) for more details. You -can link to pthread by adding `-pthread` to your linker command. Note, you can -also use `-lpthread`, but there are potential issues with ordering of command -line parameters if you use that. - -On QNX, the pthread library is part of libc and usually included automatically -(see -[`pthread_create()`](https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.lib_ref/topic/p/pthread_create.html)). -There's no separate pthread library to link. - -### Building with Visual Studio 2015 or 2017 - -The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: - -``` -// Alternatively, can add libraries using linker options. -#ifdef _WIN32 -#pragma comment ( lib, "Shlwapi.lib" ) -#ifdef _DEBUG -#pragma comment ( lib, "benchmarkd.lib" ) -#else -#pragma comment ( lib, "benchmark.lib" ) -#endif -#endif -``` - -Can also use the graphical version of CMake: -* Open `CMake GUI`. -* Under `Where to build the binaries`, same path as source plus `build`. -* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. -* Click `Configure`, `Generate`, `Open Project`. -* If build fails, try deleting entire directory and starting again, or unticking options to build less. - -### Building with Intel 2015 Update 1 or Intel System Studio Update 4 - -See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. - -### Building on Solaris - -If you're running benchmarks on solaris, you'll want the kstat library linked in -too (`-lkstat`). - -## User Guide - -### Command Line +## Command Line [Output Formats](#output-formats) @@ -280,10 +14,12 @@ [Extra Context](#extra-context) -### Library +## Library [Runtime and Reporting Considerations](#runtime-and-reporting-considerations) +[Setup/Teardown](#setupteardown) + [Passing Arguments](#passing-arguments) [Custom Benchmark Name](#custom-benchmark-name) @@ -304,9 +40,9 @@ [Setting the Time Unit](#setting-the-time-unit) -[Random Interleaving](docs/random_interleaving.md) +[Random Interleaving](random_interleaving.md) -[User-Requested Performance Counters](docs/perf_counters.md) +[User-Requested Performance Counters](perf_counters.md) [Preventing Optimization](#preventing-optimization) @@ -314,6 +50,8 @@ [Custom Statistics](#custom-statistics) +[Memory Usage](#memory-usage) + [Using RegisterBenchmark](#using-register-benchmark) [Exiting with an Error](#exiting-with-an-error) @@ -325,7 +63,7 @@ -### Output Formats +## Output Formats The library supports multiple output formats. Use the `--benchmark_format=` flag (or set the @@ -400,21 +138,21 @@ -### Output Files +## Output Files Write benchmark results to a file with the `--benchmark_out=` option (or set `BENCHMARK_OUT`). Specify the output format with `--benchmark_out_format={json|console|csv}` (or set `BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that the 'csv' reporter is -deprecated and the saved `.csv` file -[is not parsable](https://github.com/google/benchmark/issues/794) by csv +deprecated and the saved `.csv` file +[is not parsable](https://github.com/google/benchmark/issues/794) by csv parsers. Specifying `--benchmark_out` does not suppress the console output. -### Running Benchmarks +## Running Benchmarks Benchmarks are executed by running the produced binaries. Benchmarks binaries, by default, accept options that may be specified either through their command @@ -426,7 +164,7 @@ -### Running a Subset of Benchmarks +## Running a Subset of Benchmarks The `--benchmark_filter=` option (or `BENCHMARK_FILTER=` environment variable) can be used to only run the benchmarks that match @@ -446,14 +184,14 @@ -### Result comparison +## Result comparison It is possible to compare the benchmarking results. -See [Additional Tooling Documentation](docs/tools.md) +See [Additional Tooling Documentation](tools.md) -### Extra Context +## Extra Context Sometimes it's useful to add extra context to the content printed before the results. By default this section includes information about the CPU on which @@ -481,7 +219,7 @@ -### Runtime and Reporting Considerations +## Runtime and Reporting Considerations When the benchmark binary is executed, each benchmark function is run serially. The number of iterations to run is determined dynamically by running the @@ -504,9 +242,41 @@ As well as the per-benchmark entries, a preamble in the report will include information about the machine on which the benchmarks are run. + + +## Setup/Teardown + +Global setup/teardown specific to each benchmark can be done by +passing a callback to Setup/Teardown: + +The setup/teardown callbacks will be invoked once for each benchmark. +If the benchmark is multi-threaded (will run in k threads), they will be invoked exactly once before +each run with k threads. +If the benchmark uses different size groups of threads, the above will be true for each size group. + +Eg., + +```c++ +static void DoSetup(const benchmark::State& state) { +} + +static void DoTeardown(const benchmark::State& state) { +} + +static void BM_func(benchmark::State& state) {...} + +BENCHMARK(BM_func)->Arg(1)->Arg(3)->Threads(16)->Threads(32)->Setup(DoSetup)->Teardown(DoTeardown); + +``` + +In this example, `DoSetup` and `DoTearDown` will be invoked 4 times each, +specifically, once for each of this family: + - BM_func_Arg_1_Threads_16, BM_func_Arg_1_Threads_32 + - BM_func_Arg_3_Threads_16, BM_func_Arg_3_Threads_32 + -### Passing Arguments +## Passing Arguments Sometimes a family of benchmarks can be implemented with just one routine that takes an extra argument to specify which one of the family of benchmarks to @@ -594,14 +364,17 @@ product of the two specified ranges and will generate a benchmark for each such pair. +{% raw %} ```c++ BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); ``` +{% endraw %} Some benchmarks may require specific argument values that cannot be expressed with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a benchmark input for each combination in the product of the supplied vectors. +{% raw %} ```c++ BENCHMARK(BM_SetInsert) ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) @@ -620,6 +393,7 @@ ->Args({3<<10, 80}) ->Args({8<<10, 80}); ``` +{% endraw %} For the most common scenarios, helper methods for creating a list of integers for a given sparse or dense range are provided. @@ -652,7 +426,7 @@ BENCHMARK(BM_SetInsert)->Apply(CustomArguments); ``` -#### Passing Arbitrary Arguments to a Benchmark +### Passing Arbitrary Arguments to a Benchmark In C++11 it is possible to define a benchmark that takes an arbitrary number of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` @@ -662,13 +436,22 @@ should describe the values passed. ```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] +template +void BM_takes_args(benchmark::State& state, Args&&... args) { + auto args_tuple = std::make_tuple(std::move(args)...); + for (auto _ : state) { + std::cout << std::get<0>(args_tuple) << ": " << std::get<1>(args_tuple) + << '\n'; + [...] + } } // Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. +// the specified values to `args`. BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); + +// Registers the same benchmark "BM_takes_args/int_test" that passes +// the specified values to `args`. +BENCHMARK_CAPTURE(BM_takes_args, int_test, 42, 43); ``` Note that elements of `...args` may refer to global variables. Users should @@ -676,7 +459,7 @@ -### Calculating Asymptotic Complexity (Big O) +## Calculating Asymptotic Complexity (Big O) Asymptotic complexity might be calculated for a family of benchmarks. The following code will calculate the coefficient for the high-order term in the @@ -713,7 +496,7 @@ -### Custom Benchmark Name +## Custom Benchmark Name You can change the benchmark's name as follows: @@ -726,7 +509,7 @@ -### Templated Benchmarks +## Templated Benchmarks This example produces and consumes messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the absence of multiprogramming. @@ -745,14 +528,19 @@ state.SetBytesProcessed( static_cast(state.iterations())*state.range(0)); } +// C++03 BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); + +// C++11 or newer, you can use the BENCHMARK macro with template parameters: +BENCHMARK(BM_Sequential>)->Range(1<<0, 1<<10); + ``` Three macros are provided for adding benchmark templates. ```c++ #ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. +#define BENCHMARK(func<...>) // Takes any number of parameters. #else // C++ < C++11 #define BENCHMARK_TEMPLATE(func, arg1) #endif @@ -762,7 +550,7 @@ -### Fixtures +## Fixtures Fixture tests are created by first defining a type that derives from `::benchmark::Fixture` and then creating/registering the tests using the @@ -800,7 +588,7 @@ /* BarTest is now registered */ ``` -#### Templated Fixtures +### Templated Fixtures Also you can create templated fixture by using the following macros: @@ -830,7 +618,7 @@ -### Custom Counters +## Custom Counters You can add your own counters with user-defined names. The example below will add columns "Foo", "Bar" and "Baz" in its output: @@ -891,6 +679,7 @@ When you're compiling in C++11 mode or later you can use `insert()` with `std::initializer_list`: +{% raw %} ```c++ // With C++11, this can be done: state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); @@ -899,8 +688,9 @@ state.counters["Bar"] = numBars; state.counters["Baz"] = numBazs; ``` +{% endraw %} -#### Counter Reporting +### Counter Reporting When using the console reporter, by default, user counters are printed at the end after the table, the same way as ``bytes_processed`` and @@ -970,7 +760,7 @@ -### Multithreaded Benchmarks +## Multithreaded Benchmarks In a multithreaded test (benchmark invoked by multiple threads simultaneously), it is guaranteed that none of the threads will start until all have reached @@ -981,19 +771,29 @@ ```c++ static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Setup code here. } for (auto _ : state) { // Run the test as normal. } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Teardown code here. } } BENCHMARK(BM_MultiThreaded)->Threads(2); ``` +To run the benchmark across a range of thread counts, instead of `Threads`, use +`ThreadRange`. This takes two parameters (`min_threads` and `max_threads`) and +runs the benchmark once for values in the inclusive range. For example: + +```c++ +BENCHMARK(BM_MultiThreaded)->ThreadRange(1, 8); +``` + +will run `BM_MultiThreaded` with thread counts 1, 2, 4, and 8. + If the benchmarked code itself uses threads and you want to compare it to single-threaded code, you may want to use real-time ("wallclock") measurements for latency comparisons: @@ -1006,7 +806,7 @@ -### CPU Timers +## CPU Timers By default, the CPU timer only measures the time spent by the main thread. If the benchmark itself uses threads internally, this measurement may not @@ -1050,13 +850,14 @@ BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime(); ``` -#### Controlling Timers +### Controlling Timers Normally, the entire duration of the work loop (`for (auto _ : state) {}`) is measured. But sometimes, it is necessary to do some work inside of that loop, every iteration, but without counting that time to the benchmark time. That is possible, although it is not recommended, since it has high overhead. +{% raw %} ```c++ static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { std::set data; @@ -1071,10 +872,11 @@ } BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); ``` +{% endraw %} -### Manual Timing +## Manual Timing For benchmarking something for which neither CPU time nor real-time are correct or accurate enough, completely manual timing is supported using @@ -1115,7 +917,7 @@ -### Setting the Time Unit +## Setting the Time Unit If a benchmark runs a few milliseconds it may be hard to visually compare the measured times, since the output data is given in nanoseconds per default. In @@ -1125,9 +927,13 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); ``` +Additionally the default time unit can be set globally with the +`--benchmark_time_unit={ns|us|ms|s}` command line argument. The argument only +affects benchmarks where the time unit is not set explicitly. + -### Preventing Optimization +## Preventing Optimization To prevent a value or expression from being optimized away by the compiler the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` @@ -1188,7 +994,7 @@ -### Statistics: Reporting the Mean, Median and Standard Deviation of Repeated Benchmarks +## Statistics: Reporting the Mean, Median and Standard Deviation / Coefficient of variation of Repeated Benchmarks By default each benchmark is run once and that single result is reported. However benchmarks are often noisy and a single result may not be representative @@ -1198,16 +1004,17 @@ The number of runs of each benchmark is specified globally by the `--benchmark_repetitions` flag or on a per benchmark basis by calling `Repetitions` on the registered benchmark object. When a benchmark is run more -than once the mean, median and standard deviation of the runs will be reported. +than once the mean, median, standard deviation and coefficient of variation +of the runs will be reported. Additionally the `--benchmark_report_aggregates_only={true|false}`, `--benchmark_display_aggregates_only={true|false}` flags or `ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be used to change how repeated tests are reported. By default the result of each repeated run is reported. When `report aggregates only` option is `true`, -only the aggregates (i.e. mean, median and standard deviation, maybe complexity -measurements if they were requested) of the runs is reported, to both the -reporters - standard output (console), and the file. +only the aggregates (i.e. mean, median, standard deviation and coefficient +of variation, maybe complexity measurements if they were requested) of the runs +is reported, to both the reporters - standard output (console), and the file. However when only the `display aggregates only` option is `true`, only the aggregates are displayed in the standard output, while the file output still contains everything. @@ -1217,13 +1024,12 @@ -### Custom Statistics +## Custom Statistics -While having mean, median and standard deviation is nice, this may not be -enough for everyone. For example you may want to know what the largest -observation is, e.g. because you have some real-time constraints. This is easy. -The following code will specify a custom statistic to be calculated, defined -by a lambda function. +While having these aggregates is nice, this may not be enough for everyone. +For example you may want to know what the largest observation is, e.g. because +you have some real-time constraints. This is easy. The following code will +specify a custom statistic to be calculated, defined by a lambda function. ```c++ void BM_spin_empty(benchmark::State& state) { @@ -1241,9 +1047,43 @@ ->Arg(512); ``` +While usually the statistics produce values in time units, +you can also produce percentages: + +```c++ +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK(BM_spin_empty) + ->ComputeStatistics("ratio", [](const std::vector& v) -> double { + return std::begin(v) / std::end(v); + }, benchmark::StatisticUnit::kPercentage) + ->Arg(512); +``` + + + +## Memory Usage + +It's often useful to also track memory usage for benchmarks, alongside CPU +performance. For this reason, benchmark offers the `RegisterMemoryManager` +method that allows a custom `MemoryManager` to be injected. + +If set, the `MemoryManager::Start` and `MemoryManager::Stop` methods will be +called at the start and end of benchmark runs to allow user code to fill out +a report on the number of allocations, bytes used, etc. + +This data will then be reported alongside other performance data, currently +only when using JSON output. + -### Using RegisterBenchmark(name, fn, args...) +## Using RegisterBenchmark(name, fn, args...) The `RegisterBenchmark(name, func, args...)` function provides an alternative way to create and register benchmarks. @@ -1273,7 +1113,7 @@ -### Exiting with an Error +## Exiting with an Error When errors caused by external influences, such as file I/O and network communication, occur within a benchmark the @@ -1325,7 +1165,7 @@ ``` -### A Faster KeepRunning Loop +## A Faster KeepRunning Loop In C++11 mode, a ranged-based for loop should be used in preference to the `KeepRunning` loop for running the benchmarks. For example: @@ -1383,7 +1223,7 @@ -### Disabling CPU Frequency Scaling +## Disabling CPU Frequency Scaling If you see this error: @@ -1391,10 +1231,36 @@ ***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. ``` -you might want to disable the CPU frequency scaling while running the benchmark: +you might want to disable the CPU frequency scaling while running the +benchmark. Exactly how to do this depends on the Linux distribution, +desktop environment, and installed programs. Specific details are a moving +target, so we will not attempt to exhaustively document them here. + +One simple option is to use the `cpupower` program to change the +performance governor to "performance". This tool is maintained along with +the Linux kernel and provided by your distribution. + +It must be run as root, like this: ```bash sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave ``` + +After this you can verify that all CPUs are using the performance governor +by running this command: + +```bash +cpupower frequency-info -o proc +``` + +The benchmarks you subsequently run will have less variance. + +Note that changing the governor in this way will not persist across +reboots. To set the governor back, run the first command again with the +governor your system usually runs with, which varies. + +If you find yourself doing this often, there are probably better options +than running the commands above. Some approaches allow you to do this +without root access, or by using a GUI, etc. The Arch Wiki [Cpu frequency +scaling](https://wiki.archlinux.org/title/CPU_frequency_scaling) page is a +good place to start looking for options. diff --git a/MicroBenchmarks/libs/benchmark/googletest/BUILD.bazel b/MicroBenchmarks/libs/benchmark/googletest/BUILD.bazel --- a/MicroBenchmarks/libs/benchmark/googletest/BUILD.bazel +++ b/MicroBenchmarks/libs/benchmark/googletest/BUILD.bazel @@ -30,8 +30,6 @@ # # Bazel Build for Google C++ Testing Framework(Google Test) -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") - package(default_visibility = ["//visibility:public"]) licenses(["notice"]) @@ -48,6 +46,16 @@ constraint_values = ["@platforms//os:windows"], ) +config_setting( + name = "freebsd", + constraint_values = ["@platforms//os:freebsd"], +) + +config_setting( + name = "openbsd", + constraint_values = ["@platforms//os:openbsd"], +) + config_setting( name = "msvc_compiler", flag_values = { @@ -110,8 +118,10 @@ "googletest/include", ], linkopts = select({ - ":qnx": [], + ":qnx": ["-lregex"], ":windows": [], + ":freebsd": ["-lm", "-pthread"], + ":openbsd": ["-lm", "-pthread"], "//conditions:default": ["-pthread"], }), deps = select({ diff --git a/MicroBenchmarks/libs/benchmark/googletest/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/googletest/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/googletest/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/googletest/CMakeLists.txt @@ -1,19 +1,21 @@ # Note: CMake support is community-based. The maintainers do not use CMake # internally. -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) if (POLICY CMP0048) cmake_policy(SET CMP0048 NEW) endif (POLICY CMP0048) +if (POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif (POLICY CMP0077) + project(googletest-distribution) set(GOOGLETEST_VERSION 1.11.0) -if (CMAKE_VERSION VERSION_GREATER "3.0.2") - if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) - set(CMAKE_CXX_EXTENSIONS OFF) - endif() +if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) + set(CMAKE_CXX_EXTENSIONS OFF) endif() enable_testing() diff --git a/MicroBenchmarks/libs/benchmark/googletest/CONTRIBUTING.md b/MicroBenchmarks/libs/benchmark/googletest/CONTRIBUTING.md --- a/MicroBenchmarks/libs/benchmark/googletest/CONTRIBUTING.md +++ b/MicroBenchmarks/libs/benchmark/googletest/CONTRIBUTING.md @@ -36,7 +36,8 @@ This ensures that work isn't being duplicated and communicating your plan early also generally leads to better patches. 4. If your proposed change is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). + Contributor License Agreement + ([see details above](#contributor-license-agreements)). 5. Fork the desired repo, develop and test your code changes. 6. Ensure that your code adheres to the existing style in the sample to which you are contributing. diff --git a/MicroBenchmarks/libs/benchmark/googletest/README.md b/MicroBenchmarks/libs/benchmark/googletest/README.md --- a/MicroBenchmarks/libs/benchmark/googletest/README.md +++ b/MicroBenchmarks/libs/benchmark/googletest/README.md @@ -6,7 +6,7 @@ GoogleTest now follows the [Abseil Live at Head philosophy](https://abseil.io/about/philosophy#upgrade-support). -We recommend using the latest commit in the `master` branch in your projects. +We recommend using the latest commit in the `main` branch in your projects. #### Documentation Updates diff --git a/MicroBenchmarks/libs/benchmark/googletest/WORKSPACE b/MicroBenchmarks/libs/benchmark/googletest/WORKSPACE --- a/MicroBenchmarks/libs/benchmark/googletest/WORKSPACE +++ b/MicroBenchmarks/libs/benchmark/googletest/WORKSPACE @@ -4,21 +4,14 @@ http_archive( name = "com_google_absl", - urls = ["https://github.com/abseil/abseil-cpp/archive/7971fb358ae376e016d2d4fc9327aad95659b25e.zip"], # 2021-05-20T02:59:16Z - strip_prefix = "abseil-cpp-7971fb358ae376e016d2d4fc9327aad95659b25e", sha256 = "aeba534f7307e36fe084b452299e49b97420667a8d28102cf9a0daeed340b859", + strip_prefix = "abseil-cpp-7971fb358ae376e016d2d4fc9327aad95659b25e", + urls = ["https://github.com/abseil/abseil-cpp/archive/7971fb358ae376e016d2d4fc9327aad95659b25e.zip"], # 2021-05-20T02:59:16Z ) http_archive( - name = "rules_cc", - urls = ["https://github.com/bazelbuild/rules_cc/archive/68cb652a71e7e7e2858c50593e5a9e3b94e5b9a9.zip"], # 2021-05-14T14:51:14Z - strip_prefix = "rules_cc-68cb652a71e7e7e2858c50593e5a9e3b94e5b9a9", - sha256 = "1e19e9a3bc3d4ee91d7fcad00653485ee6c798efbbf9588d40b34cbfbded143d", -) - -http_archive( - name = "rules_python", - urls = ["https://github.com/bazelbuild/rules_python/archive/ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2.zip"], # 2021-05-17T00:24:16Z - strip_prefix = "rules_python-ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2", - sha256 = "98b3c592faea9636ac8444bfd9de7f3fb4c60590932d6e6ac5946e3f8dbd5ff6", + name = "rules_python", + sha256 = "98b3c592faea9636ac8444bfd9de7f3fb4c60590932d6e6ac5946e3f8dbd5ff6", + strip_prefix = "rules_python-ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2", + urls = ["https://github.com/bazelbuild/rules_python/archive/ed6cc8f2c3692a6a7f013ff8bc185ba77eb9b4d2.zip"], # 2021-05-17T00:24:16Z ) diff --git a/MicroBenchmarks/libs/benchmark/googletest/ci/linux-presubmit.sh b/MicroBenchmarks/libs/benchmark/googletest/ci/linux-presubmit.sh --- a/MicroBenchmarks/libs/benchmark/googletest/ci/linux-presubmit.sh +++ b/MicroBenchmarks/libs/benchmark/googletest/ci/linux-presubmit.sh @@ -31,8 +31,8 @@ set -euox pipefail -readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210525" -readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20201015" +readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217" +readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20210617" if [[ -z ${GTEST_ROOT:-} ]]; then GTEST_ROOT="$(realpath $(dirname ${0})/..)" diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/advanced.md b/MicroBenchmarks/libs/benchmark/googletest/docs/advanced.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/advanced.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/advanced.md @@ -157,8 +157,11 @@ example: ```c++ -EXPECT_PRED_FORMAT2(testing::FloatLE, val1, val2); -EXPECT_PRED_FORMAT2(testing::DoubleLE, val1, val2); +using ::testing::FloatLE; +using ::testing::DoubleLE; +... +EXPECT_PRED_FORMAT2(FloatLE, val1, val2); +EXPECT_PRED_FORMAT2(DoubleLE, val1, val2); ``` The above code verifies that `val1` is less than, or approximately equal to, @@ -202,10 +205,9 @@ to assert that types `T1` and `T2` are the same. The function does nothing if the assertion is satisfied. If the types are different, the function call will -fail to compile, the compiler error message will say that -`T1 and T2 are not the same type` and most likely (depending on the compiler) -show you the actual values of `T1` and `T2`. This is mainly useful inside -template code. +fail to compile, the compiler error message will say that `T1 and T2 are not the +same type` and most likely (depending on the compiler) show you the actual +values of `T1` and `T2`. This is mainly useful inside template code. **Caveat**: When used inside a member function of a class template or a function template, `StaticAssertTypeEq()` is effective only if the function is @@ -383,10 +385,10 @@ ## Death Tests In many applications, there are assertions that can cause application failure if -a condition is not met. These sanity checks, which ensure that the program is in -a known good state, are there to fail at the earliest possible time after some -program state is corrupted. If the assertion checks the wrong condition, then -the program may proceed in an erroneous state, which could lead to memory +a condition is not met. These consistency checks, which ensure that the program +is in a known good state, are there to fail at the earliest possible time after +some program state is corrupted. If the assertion checks the wrong condition, +then the program may proceed in an erroneous state, which could lead to memory corruption, security holes, or worse. Hence it is vitally important to test that such assertion statements work as expected. @@ -558,7 +560,7 @@ particular style of death tests by setting the flag programmatically: ```c++ -testing::FLAGS_gtest_death_test_style="threadsafe" +GTEST_FLAG_SET(death_test_style, "threadsafe") ``` You can do this in `main()` to set the style for all death tests in the binary, @@ -568,12 +570,12 @@ ```c++ int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); - testing::FLAGS_gtest_death_test_style = "fast"; + GTEST_FLAG_SET(death_test_style, "fast"); return RUN_ALL_TESTS(); } TEST(MyDeathTest, TestOne) { - testing::FLAGS_gtest_death_test_style = "threadsafe"; + GTEST_FLAG_SET(death_test_style, "threadsafe"); // This test is run in the "threadsafe" style: ASSERT_DEATH(ThisShouldDie(), ""); } @@ -610,15 +612,14 @@ test, thread problems such as deadlock are still possible in the presence of handlers registered with `pthread_atfork(3)`. - ## Using Assertions in Sub-routines {: .callout .note} Note: If you want to put a series of test assertions in a subroutine to check for a complex condition, consider using -[a custom GMock matcher](gmock_cook_book.md#NewMatchers) -instead. This lets you provide a more readable error message in case of failure -and avoid all of the issues described below. +[a custom GMock matcher](gmock_cook_book.md#NewMatchers) instead. This lets you +provide a more readable error message in case of failure and avoid all of the +issues described below. ### Adding Traces to Assertions @@ -631,6 +632,7 @@ ```c++ SCOPED_TRACE(message); ``` + ```c++ ScopedTrace trace("file_path", line_number, message); ``` @@ -888,6 +890,12 @@ of any shared resource, or, if they do modify the state, they must restore the state to its original value before passing control to the next test. +Note that `SetUpTestSuite()` may be called multiple times for a test fixture +class that has derived classes, so you should not expect code in the function +body to be run only once. Also, derived classes still have access to shared +resources defined as static members, so careful consideration is needed when +managing shared resources to avoid memory leaks. + Here's an example of per-test-suite set-up and tear-down: ```c++ @@ -897,7 +905,10 @@ // Called before the first test in this test suite. // Can be omitted if not needed. static void SetUpTestSuite() { - shared_resource_ = new ...; + // Avoid reallocating static objects if called in subclasses of FooTest. + if (shared_resource_ == nullptr) { + shared_resource_ = new ...; + } } // Per-test-suite tear-down. @@ -1481,8 +1492,8 @@ the exception and assert on it. But googletest doesn't use exceptions, so how do we test that a piece of code generates an expected failure? -`"gtest/gtest-spi.h"` contains some constructs to do this. After #including this header, -you can use +`"gtest/gtest-spi.h"` contains some constructs to do this. +After #including this header, you can use ```c++ EXPECT_FATAL_FAILURE(statement, substring); @@ -1586,12 +1597,14 @@ } ... int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); std::vector values_to_test = LoadValuesFromConfig(); RegisterMyTests(values_to_test); ... return RUN_ALL_TESTS(); } ``` + ## Getting the Current Test's Name Sometimes a function may need to know the name of the currently running test. @@ -1816,8 +1829,7 @@ cases (e.g. iterative test development & execution) it may be desirable stop test execution upon first failure (trading improved latency for completeness). If `GTEST_FAIL_FAST` environment variable or `--gtest_fail_fast` flag is set, -the test runner will stop execution as soon as the first test failure is -found. +the test runner will stop execution as soon as the first test failure is found. #### Temporarily Disabling Tests @@ -1911,6 +1923,58 @@ If you combine this with `--gtest_repeat=N`, googletest will pick a different random seed and re-shuffle the tests in each iteration. +### Distributing Test Functions to Multiple Machines + +If you have more than one machine you can use to run a test program, you might +want to run the test functions in parallel and get the result faster. We call +this technique *sharding*, where each machine is called a *shard*. + +GoogleTest is compatible with test sharding. To take advantage of this feature, +your test runner (not part of GoogleTest) needs to do the following: + +1. Allocate a number of machines (shards) to run the tests. +1. On each shard, set the `GTEST_TOTAL_SHARDS` environment variable to the total + number of shards. It must be the same for all shards. +1. On each shard, set the `GTEST_SHARD_INDEX` environment variable to the index + of the shard. Different shards must be assigned different indices, which + must be in the range `[0, GTEST_TOTAL_SHARDS - 1]`. +1. Run the same test program on all shards. When GoogleTest sees the above two + environment variables, it will select a subset of the test functions to run. + Across all shards, each test function in the program will be run exactly + once. +1. Wait for all shards to finish, then collect and report the results. + +Your project may have tests that were written without GoogleTest and thus don't +understand this protocol. In order for your test runner to figure out which test +supports sharding, it can set the environment variable `GTEST_SHARD_STATUS_FILE` +to a non-existent file path. If a test program supports sharding, it will create +this file to acknowledge that fact; otherwise it will not create it. The actual +contents of the file are not important at this time, although we may put some +useful information in it in the future. + +Here's an example to make it clear. Suppose you have a test program `foo_test` +that contains the following 5 test functions: + +``` +TEST(A, V) +TEST(A, W) +TEST(B, X) +TEST(B, Y) +TEST(B, Z) +``` + +Suppose you have 3 machines at your disposal. To run the test functions in +parallel, you would set `GTEST_TOTAL_SHARDS` to 3 on all machines, and set +`GTEST_SHARD_INDEX` to 0, 1, and 2 on the machines respectively. Then you would +run the same `foo_test` on each machine. + +GoogleTest reserves the right to change how the work is distributed across the +shards, but here's one possible scenario: + +* Machine #0 runs `A.V` and `B.X`. +* Machine #1 runs `A.W` and `B.Y`. +* Machine #2 runs `B.Z`. + ### Controlling Test Output #### Colored Terminal Output @@ -1965,8 +2029,6 @@ the test program with `--gtest_print_utf8=0` or set the `GTEST_PRINT_UTF8` environment variable to `0`. - - #### Generating an XML Report googletest can emit a detailed XML report to a file in addition to its normal @@ -2253,12 +2315,11 @@ #### Detecting Test Premature Exit -Google Test implements the _premature-exit-file_ protocol for test runners -to catch any kind of unexpected exits of test programs. Upon start, -Google Test creates the file which will be automatically deleted after -all work has been finished. Then, the test runner can check if this file -exists. In case the file remains undeleted, the inspected test has exited -prematurely. +Google Test implements the _premature-exit-file_ protocol for test runners to +catch any kind of unexpected exits of test programs. Upon start, Google Test +creates the file which will be automatically deleted after all work has been +finished. Then, the test runner can check if this file exists. In case the file +remains undeleted, the inspected test has exited prematurely. This feature is enabled only if the `TEST_PREMATURE_EXIT_FILE` environment variable has been set. diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/faq.md b/MicroBenchmarks/libs/benchmark/googletest/docs/faq.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/faq.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/faq.md @@ -328,7 +328,7 @@ * C++ does not allow virtual function calls in constructors and destructors. You can call a method declared as virtual, but it will not use dynamic - dispatch, it will use the definition from the class the constructor of which + dispatch. It will use the definition from the class the constructor of which is currently executing. This is because calling a virtual method before the derived class constructor has a chance to run is very dangerous - the virtual method might operate on uninitialized data. Therefore, if you need @@ -410,7 +410,6 @@ Similarly, sometimes people spell `SetUpTestSuite()` as `SetupTestSuite()` and wonder why it's never called. - ## I have several test suites which share the same test fixture logic, do I have to define a new test fixture class for each of them? This seems pretty tedious. You don't have to. Instead of diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_cook_book.md b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_cook_book.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_cook_book.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_cook_book.md @@ -1084,7 +1084,7 @@ ``` says that `Blah` will be called with arguments `x`, `y`, and `z` where `x < y < -z`. Note that in this example, it wasn't necessary specify the positional +z`. Note that in this example, it wasn't necessary to specify the positional matchers. As a convenience and example, gMock provides some matchers for 2-tuples, @@ -1452,7 +1452,7 @@ object dies, the implementation object will be deleted. Therefore, if you have some complex matcher that you want to use again and -again, there is no need to build it everytime. Just assign it to a matcher +again, there is no need to build it every time. Just assign it to a matcher variable and use that variable repeatedly! For example, ```cpp @@ -1754,7 +1754,7 @@ | A ---| | - +---> C ---> D + +---> C ---> D ``` This means that A must occur before B and C, and C must occur before D. There's @@ -1980,6 +1980,7 @@ ```cpp using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; @@ -2033,10 +2034,7 @@ } ... MockRolodex rolodex; - vector names; - names.push_back("George"); - names.push_back("John"); - names.push_back("Thomas"); + vector names = {"George", "John", "Thomas"}; EXPECT_CALL(rolodex, GetNames(_)) .WillOnce(SetArrayArgument<0>(names.begin(), names.end())); ``` @@ -2604,7 +2602,7 @@ the implementation object will be deleted. If you have some complex action that you want to use again and again, you may -not have to build it from scratch everytime. If the action doesn't have an +not have to build it from scratch every time. If the action doesn't have an internal state (i.e. if it always does the same thing no matter how many times it has been called), you can assign it to an action variable and use that variable repeatedly. For example: @@ -4191,7 +4189,7 @@ What matters is that it must have a `Perform()` method template. This method template takes the mock function's arguments as a tuple in a **single** argument, and returns the result of the action. It can be either `const` or not, -but must be invokable with exactly one template argument, which is the result +but must be invocable with exactly one template argument, which is the result type. In other words, you must be able to call `Perform(args)` where `R` is the mock function's return type and `args` is its arguments in a tuple. diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_faq.md b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_faq.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_faq.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_faq.md @@ -369,8 +369,8 @@ different types (e.g. if you are defining `Return(*value*)`), `MakePolymorphicAction()` is easiest. Sometimes you want precise control on what types of functions the action can be used in, and implementing `ActionInterface` -is the way to go here. See the implementation of `Return()` in -`testing/base/public/gmock-actions.h` for an example. +is the way to go here. See the implementation of `Return()` in `gmock-actions.h` +for an example. ### I use SetArgPointee() in WillOnce(), but gcc complains about "conflicting return type specified". What does it mean? diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_for_dummies.md b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_for_dummies.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_for_dummies.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/gmock_for_dummies.md @@ -190,12 +190,12 @@ `Foo` changes it, your test could break. (You can't really expect `Foo`'s maintainer to fix every test that uses `Foo`, can you?) -So, the rule of thumb is: if you need to mock `Foo` and it's owned by others, -define the mock class in `Foo`'s package (better, in a `testing` sub-package -such that you can clearly separate production code and testing utilities), put -it in a `.h` and a `cc_library`. Then everyone can reference them from their -tests. If `Foo` ever changes, there is only one copy of `MockFoo` to change, and -only tests that depend on the changed methods need to be fixed. +Generally, you should not define mock classes you don't own. If you must mock +such a class owned by others, define the mock class in `Foo`'s Bazel package +(usually the same directory or a `testing` sub-directory), and put it in a `.h` +and a `cc_library` with `testonly=True`. Then everyone can reference them from +their tests. If `Foo` ever changes, there is only one copy of `MockFoo` to +change, and only tests that depend on the changed methods need to be fixed. Another way to do it: you can introduce a thin layer `FooAdaptor` on top of `Foo` and code to this new interface. Since you own `FooAdaptor`, you can absorb diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/primer.md b/MicroBenchmarks/libs/benchmark/googletest/docs/primer.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/primer.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/primer.md @@ -162,9 +162,9 @@ `TEST()` arguments go from general to specific. The *first* argument is the name of the test suite, and the *second* argument is the test's name within the test -suite. Both names must be valid C++ identifiers, and they should not contain -any underscores (`_`). A test's *full name* consists of its containing test suite and -its individual name. Tests from different test suites can have the same +suite. Both names must be valid C++ identifiers, and they should not contain any +underscores (`_`). A test's *full name* consists of its containing test suite +and its individual name. Tests from different test suites can have the same individual name. For example, let's take a simple integer function: @@ -245,8 +245,8 @@ declaration`". For each test defined with `TEST_F()`, googletest will create a *fresh* test -fixture at runtime, immediately initialize it via `SetUp()`, run the test, -clean up by calling `TearDown()`, and then delete the test fixture. Note that +fixture at runtime, immediately initialize it via `SetUp()`, run the test, clean +up by calling `TearDown()`, and then delete the test fixture. Note that different tests in the same test suite have different test fixture objects, and googletest always deletes a test fixture before it creates the next one. googletest does **not** reuse the same test fixture for multiple tests. Any @@ -342,8 +342,8 @@ After defining your tests, you can run them with `RUN_ALL_TESTS()`, which returns `0` if all the tests are successful, or `1` otherwise. Note that -`RUN_ALL_TESTS()` runs *all tests* in your link unit--they can be from -different test suites, or even different source files. +`RUN_ALL_TESTS()` runs *all tests* in your link unit--they can be from different +test suites, or even different source files. When invoked, the `RUN_ALL_TESTS()` macro: @@ -456,8 +456,8 @@ The `::testing::InitGoogleTest()` function parses the command line for googletest flags, and removes all recognized flags. This allows the user to -control a test program's behavior via various flags, which we'll cover in -the [AdvancedGuide](advanced.md). You **must** call this function before calling +control a test program's behavior via various flags, which we'll cover in the +[AdvancedGuide](advanced.md). You **must** call this function before calling `RUN_ALL_TESTS()`, or the flags won't be properly initialized. On Windows, `InitGoogleTest()` also works with wide strings, so it can be used diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/quickstart-bazel.md b/MicroBenchmarks/libs/benchmark/googletest/docs/quickstart-bazel.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/quickstart-bazel.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/quickstart-bazel.md @@ -17,7 +17,7 @@ compatible with GoogleTest. If you don't already have Bazel installed, see the -[Bazel installation guide](https://docs.bazel.build/versions/master/install.html). +[Bazel installation guide](https://docs.bazel.build/versions/main/install.html). {: .callout .note} Note: The terminal commands in this tutorial show a Unix shell prompt, but the @@ -26,7 +26,7 @@ ## Set up a Bazel workspace A -[Bazel workspace](https://docs.bazel.build/versions/master/build-ref.html#workspace) +[Bazel workspace](https://docs.bazel.build/versions/main/build-ref.html#workspace) is a directory on your filesystem that you use to manage source files for the software you want to build. Each workspace directory has a text file named `WORKSPACE` which may be empty, or may contain references to external @@ -40,9 +40,9 @@ Next, you’ll create the `WORKSPACE` file to specify dependencies. A common and recommended way to depend on GoogleTest is to use a -[Bazel external dependency](https://docs.bazel.build/versions/master/external.html) +[Bazel external dependency](https://docs.bazel.build/versions/main/external.html) via the -[`http_archive` rule](https://docs.bazel.build/versions/master/repo/http.html#http_archive). +[`http_archive` rule](https://docs.bazel.build/versions/main/repo/http.html#http_archive). To do this, in the root directory of your workspace (`my_workspace/`), create a file named `WORKSPACE` with the following contents: @@ -62,18 +62,6 @@ GoogleTest version to use; we recommend updating the hash often to point to the latest version. -Bazel also needs a dependency on the -[`rules_cc` repository](https://github.com/bazelbuild/rules_cc) to build C++ -code, so add the following to the `WORKSPACE` file: - -``` -http_archive( - name = "rules_cc", - urls = ["https://github.com/bazelbuild/rules_cc/archive/40548a2974f1aea06215272d9c2b47a14a24e556.zip"], - strip_prefix = "rules_cc-40548a2974f1aea06215272d9c2b47a14a24e556", -) -``` - Now you're ready to build C++ code that uses GoogleTest. ## Create and run a binary @@ -104,8 +92,6 @@ following contents: ``` -load("@rules_cc//cc:defs.bzl", "cc_test") - cc_test( name = "hello_test", size = "small", @@ -118,7 +104,7 @@ GoogleTest (`//:gtest_main`) using the prefix you specified in the `WORKSPACE` file (`@com_google_googletest`). For more information about Bazel `BUILD` files, see the -[Bazel C++ Tutorial](https://docs.bazel.build/versions/master/tutorial/cpp.html). +[Bazel C++ Tutorial](https://docs.bazel.build/versions/main/tutorial/cpp.html). Now you can build and run your test: diff --git a/MicroBenchmarks/libs/benchmark/googletest/docs/reference/matchers.md b/MicroBenchmarks/libs/benchmark/googletest/docs/reference/matchers.md --- a/MicroBenchmarks/libs/benchmark/googletest/docs/reference/matchers.md +++ b/MicroBenchmarks/libs/benchmark/googletest/docs/reference/matchers.md @@ -88,16 +88,17 @@ | Matcher | Description | | :---------------------- | :------------------------------------------------- | -| `ContainsRegex(string)` | `argument` matches the given regular expression. | -| `EndsWith(suffix)` | `argument` ends with string `suffix`. | -| `HasSubstr(string)` | `argument` contains `string` as a sub-string. | -| `IsEmpty()` | `argument` is an empty string. | -| `MatchesRegex(string)` | `argument` matches the given regular expression with the match starting at the first character and ending at the last character. | -| `StartsWith(prefix)` | `argument` starts with string `prefix`. | -| `StrCaseEq(string)` | `argument` is equal to `string`, ignoring case. | -| `StrCaseNe(string)` | `argument` is not equal to `string`, ignoring case. | -| `StrEq(string)` | `argument` is equal to `string`. | -| `StrNe(string)` | `argument` is not equal to `string`. | +| `ContainsRegex(string)` | `argument` matches the given regular expression. | +| `EndsWith(suffix)` | `argument` ends with string `suffix`. | +| `HasSubstr(string)` | `argument` contains `string` as a sub-string. | +| `IsEmpty()` | `argument` is an empty string. | +| `MatchesRegex(string)` | `argument` matches the given regular expression with the match starting at the first character and ending at the last character. | +| `StartsWith(prefix)` | `argument` starts with string `prefix`. | +| `StrCaseEq(string)` | `argument` is equal to `string`, ignoring case. | +| `StrCaseNe(string)` | `argument` is not equal to `string`, ignoring case. | +| `StrEq(string)` | `argument` is equal to `string`. | +| `StrNe(string)` | `argument` is not equal to `string`. | +| `WhenBase64Unescaped(m)` | `argument` is a base-64 escaped string whose unescaped string matches `m`. | `ContainsRegex()` and `MatchesRegex()` take ownership of the `RE` object. They use the regular expression syntax defined @@ -193,6 +194,7 @@ | Matcher | Description | | :--------------- | :------------------------------------------------ | | `ResultOf(f, m)` | `f(argument)` matches matcher `m`, where `f` is a function or functor. | +| `ResultOf(result_description, f, m)` | The same as the two-parameter version, but provides a better error message. ## Pointer Matchers @@ -237,7 +239,7 @@ | `AnyOf(m1, m2, ..., mn)` | `argument` matches at least one of the matchers `m1` to `mn`. | | `AnyOfArray({m0, m1, ..., mn})`, `AnyOfArray(a_container)`, `AnyOfArray(begin, end)`, `AnyOfArray(array)`, or `AnyOfArray(array, count)` | The same as `AnyOf()` except that the matchers come from an initializer list, STL-style container, iterator range, or C-style array. | | `Not(m)` | `argument` doesn't match matcher `m`. | -| `Conditional(cond, m1, m2)` | Matches matcher `m1` if `cond` evalutes to true, else matches `m2`.| +| `Conditional(cond, m1, m2)` | Matches matcher `m1` if `cond` evaluates to true, else matches `m2`.| ## Adapters for Matchers diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/googletest/googlemock/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/CMakeLists.txt @@ -36,13 +36,9 @@ # as ${gmock_SOURCE_DIR} and to the root binary directory as # ${gmock_BINARY_DIR}. # Language "C" is required for find_package(Threads). -if (CMAKE_VERSION VERSION_LESS 3.0) - project(gmock CXX C) -else() - cmake_policy(SET CMP0048 NEW) - project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) -endif() -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) +cmake_policy(SET CMP0048 NEW) +project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) if (COMMAND set_up_hermetic_build) set_up_hermetic_build() @@ -109,11 +105,12 @@ # to the targets for when we are part of a parent build (ie being pulled # in via add_subdirectory() rather than being a standalone build). if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11") + string(REPLACE ";" "$" dirs "${gmock_build_include_dirs}") target_include_directories(gmock SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") target_include_directories(gmock_main SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") endif() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/README.md b/MicroBenchmarks/libs/benchmark/googletest/googlemock/README.md --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/README.md +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/README.md @@ -35,10 +35,6 @@ * [gMock Cookbook](https://google.github.io/googletest/gmock_cook_book.html) * [gMock Cheat Sheet](https://google.github.io/googletest/gmock_cheat_sheet.html) -Please note that code under scripts/generator/ is from the -[cppclean project](http://code.google.com/p/cppclean/) and under the Apache -License, which is different from GoogleMock's license. - GoogleMock is a part of [GoogleTest C++ testing framework](http://github.com/google/googletest/) and a subject to the same requirements. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-actions.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-actions.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-actions.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-actions.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // The ACTION* family of macros can be used in a namespace scope to @@ -125,7 +124,8 @@ // To learn more about using these macros, please search for 'ACTION' on // https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ @@ -1079,9 +1079,9 @@ template struct ReturnArgAction { template - auto operator()(const Args&... args) const -> - typename std::tuple_element>::type { - return std::get(std::tie(args...)); + auto operator()(Args&&... args) const -> decltype(std::get( + std::forward_as_tuple(std::forward(args)...))) { + return std::get(std::forward_as_tuple(std::forward(args)...)); } }; @@ -1610,6 +1610,9 @@ std::shared_ptr impl_; \ }; \ template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) GTEST_MUST_USE_RESULT_; \ + template \ inline full_name name( \ GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \ return full_name( \ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-cardinalities.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-cardinalities.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-cardinalities.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-cardinalities.h @@ -27,14 +27,14 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used cardinalities. More // cardinalities can be defined by the user implementing the // CardinalityInterface interface if necessary. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-function-mocker.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-function-mocker.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-function-mocker.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-function-mocker.h @@ -31,7 +31,8 @@ // // This file implements MOCK_METHOD. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT @@ -64,6 +65,39 @@ } }; +constexpr bool PrefixOf(const char* a, const char* b) { + return *a == 0 || (*a == *b && internal::PrefixOf(a + 1, b + 1)); +} + +template +constexpr bool StartsWith(const char (&prefix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(prefix, str); +} + +template +constexpr bool EndsWith(const char (&suffix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(suffix, str + M - N); +} + +template +constexpr bool Equals(const char (&a)[N], const char (&b)[M]) { + return N == M && internal::PrefixOf(a, b); +} + +template +constexpr bool ValidateSpec(const char (&spec)[N]) { + return internal::Equals("const", spec) || + internal::Equals("override", spec) || + internal::Equals("final", spec) || + internal::Equals("noexcept", spec) || + (internal::StartsWith("noexcept(", spec) && + internal::EndsWith(")", spec)) || + internal::Equals("ref(&)", spec) || + internal::Equals("ref(&&)", spec) || + (internal::StartsWith("Calltype(", spec) && + internal::EndsWith(")", spec)); +} + } // namespace internal // The style guide prohibits "using" statements in a namespace scope @@ -86,17 +120,18 @@ #define GMOCK_INTERNAL_MOCK_METHOD_ARG_3(_Ret, _MethodName, _Args) \ GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, ()) -#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ - GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ - GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ - GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ - GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ - GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ - GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ - GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ - GMOCK_INTERNAL_GET_CALLTYPE(_Spec), GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ + GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ + GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ + GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ (GMOCK_INTERNAL_SIGNATURE(_Ret, _Args))) #define GMOCK_INTERNAL_MOCK_METHOD_ARG_5(...) \ @@ -170,7 +205,7 @@ #define GMOCK_INTERNAL_EXPAND(...) __VA_ARGS__ -// Five Valid modifiers. +// Valid modifiers. #define GMOCK_INTERNAL_HAS_CONST(_Tuple) \ GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_CONST, ~, _Tuple)) @@ -189,6 +224,14 @@ GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)), \ _elem, ) +#define GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE, ~, _Tuple) + +#define GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + #define GMOCK_INTERNAL_GET_REF_SPEC(_Tuple) \ GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_REF_SPEC_IF_REF, ~, _Tuple) @@ -196,19 +239,25 @@ GMOCK_PP_IF(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)), \ GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) -#define GMOCK_INTERNAL_GET_CALLTYPE(_Tuple) \ - GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_CALLTYPE_IMPL, ~, _Tuple) - -#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ - static_assert( \ - (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ - GMOCK_INTERNAL_IS_CALLTYPE(_elem)) == 1, \ - GMOCK_PP_STRINGIZE( \ +#ifdef GMOCK_INTERNAL_STRICT_SPEC_ASSERT +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + ::testing::internal::ValidateSpec(GMOCK_PP_STRINGIZE(_elem)), \ + "Token \'" GMOCK_PP_STRINGIZE( \ + _elem) "\' cannot be recognized as a valid specification " \ + "modifier. Is a ',' missing?"); +#else +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem))) == 1, \ + GMOCK_PP_STRINGIZE( \ _elem) " cannot be recognized as a valid specification modifier."); +#endif // GMOCK_INTERNAL_STRICT_SPEC_ASSERT // Modifiers implementation. #define GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem) \ @@ -238,26 +287,12 @@ #define GMOCK_INTERNAL_UNPACK_ref(x) x -#define GMOCK_INTERNAL_GET_CALLTYPE_IMPL(_i, _, _elem) \ - GMOCK_PP_IF(GMOCK_INTERNAL_IS_CALLTYPE(_elem), \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE, GMOCK_PP_EMPTY) \ - (_elem) +#define GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CALLTYPE_I_, _elem) + +#define GMOCK_INTERNAL_DETECT_CALLTYPE_I_Calltype , -// TODO(iserna): GMOCK_INTERNAL_IS_CALLTYPE and -// GMOCK_INTERNAL_GET_VALUE_CALLTYPE needed more expansions to work on windows -// maybe they can be simplified somehow. -#define GMOCK_INTERNAL_IS_CALLTYPE(_arg) \ - GMOCK_INTERNAL_IS_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_IS_CALLTYPE_I(_arg) GMOCK_PP_IS_ENCLOSED_PARENS(_arg) - -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE(_arg) \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I(_arg) \ - GMOCK_PP_IDENTITY _arg - -#define GMOCK_INTERNAL_IS_CALLTYPE_HELPER_Calltype +#define GMOCK_INTERNAL_UNPACK_Calltype(...) __VA_ARGS__ // Note: The use of `identity_t` here allows _Ret to represent return types that // would normally need to be specified in a different way. For example, a method diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-matchers.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-matchers.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-matchers.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-matchers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // The MATCHER* family of macros can be used in a namespace scope to @@ -250,7 +249,8 @@ // See googletest/include/gtest/gtest-matchers.h for the definition of class // Matcher, class MatcherInterface, and others. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ @@ -396,7 +396,7 @@ // is already a Matcher. This only compiles when type T can be // statically converted to type U. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& source_matcher) { return Matcher(new Impl(source_matcher)); @@ -450,7 +450,7 @@ // This even more specialized version is used for efficiently casting // a matcher to its own type. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& matcher) { return matcher; } }; @@ -544,7 +544,7 @@ constexpr bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther; GTEST_COMPILE_ASSERT_( kTIsOther || kUIsOther || - (internal::LosslessArithmeticConvertible::value), + (internal::LosslessArithmeticConvertible::value), conversion_of_arithmetic_types_must_be_lossless); return MatcherCast(matcher); } @@ -689,8 +689,7 @@ // is no failure, nothing will be streamed to os. template void ExplainMatchFailureTupleTo(const MatcherTuple& matchers, - const ValueTuple& values, - ::std::ostream* os) { + const ValueTuple& values, ::std::ostream* os) { TuplePrefix::value>::ExplainMatchFailuresTo( matchers, values, os); } @@ -714,14 +713,14 @@ private: template struct IterateOverTuple { - OutIter operator() (Func f, const Tup& t, OutIter out) const { + OutIter operator()(Func f, const Tup& t, OutIter out) const { *out++ = f(::std::get(t)); return IterateOverTuple()(f, t, out); } }; template struct IterateOverTuple { - OutIter operator() (Func /* f */, const Tup& /* t */, OutIter out) const { + OutIter operator()(Func /* f */, const Tup& /* t */, OutIter out) const { return out; } }; @@ -767,9 +766,7 @@ } void DescribeTo(::std::ostream* os) const { *os << "is NULL"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "isn't NULL"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "isn't NULL"; } }; // Implements the polymorphic NotNull() matcher, which matches any raw or smart @@ -783,9 +780,7 @@ } void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "is NULL"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "is NULL"; } }; // Ref(variable) matches any argument that is a reference to @@ -871,8 +866,7 @@ // String comparison for narrow or wide strings that can have embedded NUL // characters. template -bool CaseInsensitiveStringEquals(const StringType& s1, - const StringType& s2) { +bool CaseInsensitiveStringEquals(const StringType& s1, const StringType& s2) { // Are the heads equal? if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) { return false; @@ -933,8 +927,8 @@ bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType s2(s); - const bool eq = case_sensitive_ ? s2 == string_ : - CaseInsensitiveStringEquals(s2, string_); + const bool eq = case_sensitive_ ? s2 == string_ + : CaseInsensitiveStringEquals(s2, string_); return expect_eq_ == eq; } @@ -1021,8 +1015,7 @@ template class StartsWithMatcher { public: - explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) { - } + explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) {} #if GTEST_INTERNAL_HAS_STRING_VIEW bool MatchAndExplain(const internal::StringView& s, @@ -1053,7 +1046,7 @@ MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= prefix_.length() && - s2.substr(0, prefix_.length()) == prefix_; + s2.substr(0, prefix_.length()) == prefix_; } void DescribeTo(::std::ostream* os) const { @@ -1107,7 +1100,7 @@ MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= suffix_.length() && - s2.substr(s2.length() - suffix_.length()) == suffix_; + s2.substr(s2.length() - suffix_.length()) == suffix_; } void DescribeTo(::std::ostream* os) const { @@ -1124,6 +1117,45 @@ const StringType suffix_; }; +// Implements the polymorphic WhenBase64Unescaped(matcher) matcher, which can be +// used as a Matcher as long as T can be converted to a string. +class WhenBase64UnescapedMatcher { + public: + using is_gtest_matcher = void; + + explicit WhenBase64UnescapedMatcher( + const Matcher& internal_matcher) + : internal_matcher_(internal_matcher) {} + + // Matches anything that can convert to std::string. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* listener) const { + const std::string s2(s); // NOLINT (needed for working with string_view). + std::string unescaped; + if (!internal::Base64Unescape(s2, &unescaped)) { + if (listener != nullptr) { + *listener << "is not a valid base64 escaped string"; + } + return false; + } + return MatchPrintAndExplain(unescaped, internal_matcher_, listener); + } + + void DescribeTo(::std::ostream* os) const { + *os << "matches after Base64Unescape "; + internal_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "does not match after Base64Unescape "; + internal_matcher_.DescribeTo(os); + } + + private: + const Matcher internal_matcher_; +}; + // Implements a matcher that compares the two fields of a 2-tuple // using one of the ==, <=, <, etc, operators. The two fields being // compared don't have to have the same type. @@ -1197,8 +1229,7 @@ template class NotMatcherImpl : public MatcherInterface { public: - explicit NotMatcherImpl(const Matcher& matcher) - : matcher_(matcher) {} + explicit NotMatcherImpl(const Matcher& matcher) : matcher_(matcher) {} bool MatchAndExplain(const T& x, MatchResultListener* listener) const override { @@ -1242,7 +1273,7 @@ template class AllOfMatcherImpl : public MatcherInterface { public: - explicit AllOfMatcherImpl(std::vector > matchers) + explicit AllOfMatcherImpl(std::vector> matchers) : matchers_(std::move(matchers)) {} void DescribeTo(::std::ostream* os) const override { @@ -1293,7 +1324,7 @@ } private: - const std::vector > matchers_; + const std::vector> matchers_; }; // VariadicMatcher is used for the variadic implementation of @@ -1316,14 +1347,14 @@ // all of the provided matchers (Matcher1, Matcher2, ...) can match. template operator Matcher() const { - std::vector > values; + std::vector> values; CreateVariadicMatcher(&values, std::integral_constant()); return Matcher(new CombiningMatcher(std::move(values))); } private: template - void CreateVariadicMatcher(std::vector >* values, + void CreateVariadicMatcher(std::vector>* values, std::integral_constant) const { values->push_back(SafeMatcherCast(std::get(matchers_))); CreateVariadicMatcher(values, std::integral_constant()); @@ -1331,7 +1362,7 @@ template void CreateVariadicMatcher( - std::vector >*, + std::vector>*, std::integral_constant) const {} std::tuple matchers_; @@ -1347,7 +1378,7 @@ template class AnyOfMatcherImpl : public MatcherInterface { public: - explicit AnyOfMatcherImpl(std::vector > matchers) + explicit AnyOfMatcherImpl(std::vector> matchers) : matchers_(std::move(matchers)) {} void DescribeTo(::std::ostream* os) const override { @@ -1398,7 +1429,7 @@ } private: - const std::vector > matchers_; + const std::vector> matchers_; }; // AnyOfMatcher is used for the variadic implementation of AnyOf(m_1, m_2, ...). @@ -1425,8 +1456,6 @@ bool condition_; MatcherTrue matcher_true_; MatcherFalse matcher_false_; - - GTEST_DISALLOW_ASSIGN_(ConditionalMatcher); }; // Wrapper for implementation of Any/AllOfArray(). @@ -1478,8 +1507,7 @@ // We cannot write 'return !!predicate_(x);' as that doesn't work // when predicate_(x) returns a class convertible to bool but // having no operator!(). - if (predicate_(x)) - return true; + if (predicate_(x)) return true; *listener << "didn't satisfy the given predicate"; return false; } @@ -1587,8 +1615,8 @@ // used for implementing ASSERT_THAT() and EXPECT_THAT(). // Implementation detail: 'matcher' is received by-value to force decaying. template -inline PredicateFormatterFromMatcher -MakePredicateFormatterFromMatcher(M matcher) { +inline PredicateFormatterFromMatcher MakePredicateFormatterFromMatcher( + M matcher) { return PredicateFormatterFromMatcher(std::move(matcher)); } @@ -1603,9 +1631,7 @@ } void DescribeTo(::std::ostream* os) const { *os << "is NaN"; } - void DescribeNegationTo(::std::ostream* os) const { - *os << "isn't NaN"; - } + void DescribeNegationTo(::std::ostream* os) const { *os << "isn't NaN"; } }; // Implements the polymorphic floating point equality matcher, which matches @@ -1621,9 +1647,8 @@ // equality comparisons between NANs will always return false. We specify a // negative max_abs_error_ term to indicate that ULP-based approximation will // be used for comparison. - FloatingEqMatcher(FloatType expected, bool nan_eq_nan) : - expected_(expected), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) { - } + FloatingEqMatcher(FloatType expected, bool nan_eq_nan) + : expected_(expected), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) {} // Constructor that supports a user-specified max_abs_error that will be used // for comparison instead of ULP-based approximation. The max absolute @@ -1685,8 +1710,8 @@ // os->precision() returns the previously set precision, which we // store to restore the ostream to its original configuration // after outputting. - const ::std::streamsize old_precision = os->precision( - ::std::numeric_limits::digits10 + 2); + const ::std::streamsize old_precision = + os->precision(::std::numeric_limits::digits10 + 2); if (FloatingPoint(expected_).is_nan()) { if (nan_eq_nan_) { *os << "is NaN"; @@ -1704,8 +1729,8 @@ void DescribeNegationTo(::std::ostream* os) const override { // As before, get original precision. - const ::std::streamsize old_precision = os->precision( - ::std::numeric_limits::digits10 + 2); + const ::std::streamsize old_precision = + os->precision(::std::numeric_limits::digits10 + 2); if (FloatingPoint(expected_).is_nan()) { if (nan_eq_nan_) { *os << "isn't NaN"; @@ -1723,9 +1748,7 @@ } private: - bool HasMaxAbsError() const { - return max_abs_error_ >= 0; - } + bool HasMaxAbsError() const { return max_abs_error_ >= 0; } const FloatType expected_; const bool nan_eq_nan_; @@ -1797,9 +1820,8 @@ template class Impl : public MatcherInterface { public: - Impl(FloatType max_abs_error, bool nan_eq_nan) : - max_abs_error_(max_abs_error), - nan_eq_nan_(nan_eq_nan) {} + Impl(FloatType max_abs_error, bool nan_eq_nan) + : max_abs_error_(max_abs_error), nan_eq_nan_(nan_eq_nan) {} bool MatchAndExplain(Tuple args, MatchResultListener* listener) const override { @@ -1975,9 +1997,7 @@ protected: const Matcher matcher_; - static std::string GetToName() { - return GetTypeName(); - } + static std::string GetToName() { return GetTypeName(); } private: static void GetCastTypeDescription(::std::ostream* os) { @@ -2114,7 +2134,7 @@ } template - bool MatchAndExplain(const T&value, MatchResultListener* listener) const { + bool MatchAndExplain(const T& value, MatchResultListener* listener) const { return MatchAndExplainImpl( typename std::is_pointer::type>::type(), value, listener); @@ -2166,16 +2186,16 @@ // Specialization for function pointers. template -struct CallableTraits { +struct CallableTraits { typedef ResType ResultType; - typedef ResType(*StorageType)(ArgType); + typedef ResType (*StorageType)(ArgType); - static void CheckIsValid(ResType(*f)(ArgType)) { + static void CheckIsValid(ResType (*f)(ArgType)) { GTEST_CHECK_(f != nullptr) << "NULL function pointer is passed into ResultOf()."; } template - static ResType Invoke(ResType(*f)(ArgType), T arg) { + static ResType Invoke(ResType (*f)(ArgType), T arg) { return (*f)(arg); } }; @@ -2186,13 +2206,21 @@ class ResultOfMatcher { public: ResultOfMatcher(Callable callable, InnerMatcher matcher) - : callable_(std::move(callable)), matcher_(std::move(matcher)) { + : ResultOfMatcher(/*result_description=*/"", std::move(callable), + std::move(matcher)) {} + + ResultOfMatcher(const std::string& result_description, Callable callable, + InnerMatcher matcher) + : result_description_(result_description), + callable_(std::move(callable)), + matcher_(std::move(matcher)) { CallableTraits::CheckIsValid(callable_); } template operator Matcher() const { - return Matcher(new Impl(callable_, matcher_)); + return Matcher( + new Impl(result_description_, callable_, matcher_)); } private: @@ -2205,16 +2233,27 @@ public: template - Impl(const CallableStorageType& callable, const M& matcher) - : callable_(callable), matcher_(MatcherCast(matcher)) {} + Impl(const std::string& result_description, + const CallableStorageType& callable, const M& matcher) + : result_description_(result_description), + callable_(callable), + matcher_(MatcherCast(matcher)) {} void DescribeTo(::std::ostream* os) const override { - *os << "is mapped by the given callable to a value that "; + if (result_description_.empty()) { + *os << "is mapped by the given callable to a value that "; + } else { + *os << "whose " << result_description_ << " "; + } matcher_.DescribeTo(os); } void DescribeNegationTo(::std::ostream* os) const override { - *os << "is mapped by the given callable to a value that "; + if (result_description_.empty()) { + *os << "is mapped by the given callable to a value that "; + } else { + *os << "whose " << result_description_ << " "; + } matcher_.DescribeNegationTo(os); } @@ -2230,6 +2269,7 @@ } private: + const std::string result_description_; // Functors often define operator() as non-const method even though // they are actually stateless. But we need to use them even when // 'this' is a const pointer. It's the user's responsibility not to @@ -2239,6 +2279,7 @@ const Matcher matcher_; }; // class Impl + const std::string result_description_; const CallableStorageType callable_; const InnerMatcher matcher_; }; @@ -2248,8 +2289,7 @@ class SizeIsMatcher { public: explicit SizeIsMatcher(const SizeMatcher& size_matcher) - : size_matcher_(size_matcher) { - } + : size_matcher_(size_matcher) {} template operator Matcher() const { @@ -2277,8 +2317,8 @@ SizeType size = container.size(); StringMatchResultListener size_listener; const bool result = size_matcher_.MatchAndExplain(size, &size_listener); - *listener - << "whose size " << size << (result ? " matches" : " doesn't match"); + *listener << "whose size " << size + << (result ? " matches" : " doesn't match"); PrintIfNotEmpty(size_listener.str(), listener->stream()); return result; } @@ -2307,8 +2347,9 @@ template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView; + typedef internal::StlContainerView + ContainerView; typedef typename std::iterator_traits< typename ContainerView::type::const_iterator>::difference_type DistanceType; @@ -2388,18 +2429,15 @@ typedef internal::StlContainerView< typename std::remove_const::type> LhsView; - typedef typename LhsView::type LhsStlContainer; StlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); - if (lhs_stl_container == expected_) - return true; + if (lhs_stl_container == expected_) return true; ::std::ostream* const os = listener->stream(); if (os != nullptr) { // Something is different. Check for extra values first. bool printed_header = false; - for (typename LhsStlContainer::const_iterator it = - lhs_stl_container.begin(); - it != lhs_stl_container.end(); ++it) { + for (auto it = lhs_stl_container.begin(); it != lhs_stl_container.end(); + ++it) { if (internal::ArrayAwareFind(expected_.begin(), expected_.end(), *it) == expected_.end()) { if (printed_header) { @@ -2414,11 +2452,10 @@ // Now check for missing values. bool printed_header2 = false; - for (typename StlContainer::const_iterator it = expected_.begin(); - it != expected_.end(); ++it) { - if (internal::ArrayAwareFind( - lhs_stl_container.begin(), lhs_stl_container.end(), *it) == - lhs_stl_container.end()) { + for (auto it = expected_.begin(); it != expected_.end(); ++it) { + if (internal::ArrayAwareFind(lhs_stl_container.begin(), + lhs_stl_container.end(), + *it) == lhs_stl_container.end()) { if (printed_header2) { *os << ", "; } else { @@ -2441,7 +2478,9 @@ // A comparator functor that uses the < operator to compare two values. struct LessComparator { template - bool operator()(const T& lhs, const U& rhs) const { return lhs < rhs; } + bool operator()(const T& lhs, const U& rhs) const { + return lhs < rhs; + } }; // Implements WhenSortedBy(comparator, container_matcher). @@ -2460,14 +2499,16 @@ template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; + typedef internal::StlContainerView + LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; // Transforms std::pair into std::pair // so that we can match associative containers. - typedef typename RemoveConstFromKey< - typename LhsStlContainer::value_type>::type LhsValue; + typedef + typename RemoveConstFromKey::type + LhsValue; Impl(const Comparator& comparator, const ContainerMatcher& matcher) : comparator_(comparator), matcher_(matcher) {} @@ -2487,8 +2528,8 @@ LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); ::std::vector sorted_container(lhs_stl_container.begin(), lhs_stl_container.end()); - ::std::sort( - sorted_container.begin(), sorted_container.end(), comparator_); + ::std::sort(sorted_container.begin(), sorted_container.end(), + comparator_); if (!listener->IsInterested()) { // If the listener is not interested, we do not need to @@ -2501,8 +2542,8 @@ *listener << " when sorted"; StringMatchResultListener inner_listener; - const bool match = matcher_.MatchAndExplain(sorted_container, - &inner_listener); + const bool match = + matcher_.MatchAndExplain(sorted_container, &inner_listener); PrintIfNotEmpty(inner_listener.str(), listener->stream()); return match; } @@ -2557,8 +2598,9 @@ template class Impl : public MatcherInterface { public: - typedef internal::StlContainerView< - GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; + typedef internal::StlContainerView + LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; typedef typename LhsStlContainer::value_type LhsValue; @@ -2598,8 +2640,8 @@ return false; } - typename LhsStlContainer::const_iterator left = lhs_stl_container.begin(); - typename RhsStlContainer::const_iterator right = rhs_.begin(); + auto left = lhs_stl_container.begin(); + auto right = rhs_.begin(); for (size_t i = 0; i != actual_size; ++i, ++left, ++right) { if (listener->IsInterested()) { StringMatchResultListener inner_listener; @@ -2652,18 +2694,17 @@ template explicit QuantifierMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( - testing::SafeMatcherCast(inner_matcher)) {} + testing::SafeMatcherCast(inner_matcher)) {} // Checks whether: // * All elements in the container match, if all_elements_should_match. // * Any element in the container matches, if !all_elements_should_match. - bool MatchAndExplainImpl(bool all_elements_should_match, - Container container, + bool MatchAndExplainImpl(bool all_elements_should_match, Container container, MatchResultListener* listener) const { StlContainerReference stl_container = View::ConstReference(container); size_t i = 0; - for (typename StlContainer::const_iterator it = stl_container.begin(); - it != stl_container.end(); ++it, ++i) { + for (auto it = stl_container.begin(); it != stl_container.end(); + ++it, ++i) { StringMatchResultListener inner_listener; const bool matches = inner_matcher_.MatchAndExplain(*it, &inner_listener); @@ -2906,8 +2947,7 @@ template explicit KeyMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( - testing::SafeMatcherCast(inner_matcher)) { - } + testing::SafeMatcherCast(inner_matcher)) {} // Returns true if and only if 'key_value.first' (the key) matches the inner // matcher. @@ -3012,8 +3052,7 @@ : first_matcher_( testing::SafeMatcherCast(first_matcher)), second_matcher_( - testing::SafeMatcherCast(second_matcher)) { - } + testing::SafeMatcherCast(second_matcher)) {} // Describes what this matcher does. void DescribeTo(::std::ostream* os) const override { @@ -3091,7 +3130,7 @@ : first_matcher_(first_matcher), second_matcher_(second_matcher) {} template - operator Matcher () const { + operator Matcher() const { return Matcher( new PairMatcherImpl(first_matcher_, second_matcher_)); } @@ -3363,7 +3402,7 @@ // explanations[i] is the explanation of the element at index i. ::std::vector explanations(count()); StlContainerReference stl_container = View::ConstReference(container); - typename StlContainer::const_iterator it = stl_container.begin(); + auto it = stl_container.begin(); size_t exam_pos = 0; bool mismatch_found = false; // Have we found a mismatched element yet? @@ -3440,7 +3479,7 @@ size_t count() const { return matchers_.size(); } - ::std::vector > matchers_; + ::std::vector> matchers_; }; // Connectivity matrix of (elements X matchers), in element-major order. @@ -3452,8 +3491,7 @@ MatchMatrix(size_t num_elements, size_t num_matchers) : num_elements_(num_elements), num_matchers_(num_matchers), - matched_(num_elements_* num_matchers_, 0) { - } + matched_(num_elements_ * num_matchers_, 0) {} size_t LhsSize() const { return num_elements_; } size_t RhsSize() const { return num_matchers_; } @@ -3492,8 +3530,7 @@ // Returns a maximum bipartite matching for the specified graph 'g'. // The matching is represented as a vector of {element, matcher} pairs. -GTEST_API_ ElementMatcherPairs -FindMaxBipartiteMatching(const MatchMatrix& g); +GTEST_API_ ElementMatcherPairs FindMaxBipartiteMatching(const MatchMatrix& g); struct UnorderedMatcherRequire { enum Flags { @@ -3530,9 +3567,7 @@ bool FindPairing(const MatchMatrix& matrix, MatchResultListener* listener) const; - MatcherDescriberVec& matcher_describers() { - return matcher_describers_; - } + MatcherDescriberVec& matcher_describers() { return matcher_describers_; } static Message Elements(size_t n) { return Message() << n << " element" << (n == 1 ? "" : "s"); @@ -3556,7 +3591,6 @@ typedef internal::StlContainerView View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; - typedef typename StlContainer::const_iterator StlContainerConstIterator; typedef typename StlContainer::value_type Element; template @@ -3639,7 +3673,7 @@ return matrix; } - ::std::vector > matchers_; + ::std::vector> matchers_; }; // Functor for use in TransformTuple. @@ -3664,7 +3698,7 @@ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView::type View; typedef typename View::value_type Element; - typedef ::std::vector > MatcherVec; + typedef ::std::vector> MatcherVec; MatcherVec matchers; matchers.reserve(::std::tuple_size::value); TransformTupleValues(CastAndAppendTransform(), matchers_, @@ -3695,7 +3729,7 @@ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView::type View; typedef typename View::value_type Element; - typedef ::std::vector > MatcherVec; + typedef ::std::vector> MatcherVec; MatcherVec matchers; matchers.reserve(::std::tuple_size::value); TransformTupleValues(CastAndAppendTransform(), matchers_, @@ -3830,9 +3864,9 @@ // 'negation' is false; otherwise returns the description of the // negation of the matcher. 'param_values' contains a list of strings // that are the print-out of the matcher's parameters. -GTEST_API_ std::string FormatMatcherDescription(bool negation, - const char* matcher_name, - const Strings& param_values); +GTEST_API_ std::string FormatMatcherDescription( + bool negation, const char* matcher_name, + const std::vector& param_names, const Strings& param_values); // Implements a matcher that checks the value of a optional<> type variable. template @@ -4155,14 +4189,14 @@ } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(const T* pointer, size_t count) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + const T* pointer, size_t count) { return UnorderedElementsAreArray(pointer, pointer + count); } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(const T (&array)[N]) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + const T (&array)[N]) { return UnorderedElementsAreArray(array, N); } @@ -4174,8 +4208,8 @@ } template -inline internal::UnorderedElementsAreArrayMatcher -UnorderedElementsAreArray(::std::initializer_list xs) { +inline internal::UnorderedElementsAreArrayMatcher UnorderedElementsAreArray( + ::std::initializer_list xs) { return UnorderedElementsAreArray(xs.begin(), xs.end()); } @@ -4209,14 +4243,14 @@ } // Creates a polymorphic matcher that matches any NULL pointer. -inline PolymorphicMatcher IsNull() { +inline PolymorphicMatcher IsNull() { return MakePolymorphicMatcher(internal::IsNullMatcher()); } // Creates a polymorphic matcher that matches any non-NULL pointer. // This is convenient as Not(NULL) doesn't compile (the compiler // thinks that that expression is comparing a pointer with an integer). -inline PolymorphicMatcher NotNull() { +inline PolymorphicMatcher NotNull() { return MakePolymorphicMatcher(internal::NotNullMatcher()); } @@ -4247,8 +4281,8 @@ // Creates a matcher that matches any double argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. -inline internal::FloatingEqMatcher DoubleNear( - double rhs, double max_abs_error) { +inline internal::FloatingEqMatcher DoubleNear(double rhs, + double max_abs_error) { return internal::FloatingEqMatcher(rhs, false, max_abs_error); } @@ -4275,8 +4309,8 @@ // Creates a matcher that matches any float argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. -inline internal::FloatingEqMatcher FloatNear( - float rhs, float max_abs_error) { +inline internal::FloatingEqMatcher FloatNear(float rhs, + float max_abs_error) { return internal::FloatingEqMatcher(rhs, false, max_abs_error); } @@ -4304,7 +4338,7 @@ // If To is a reference and the cast fails, this matcher returns false // immediately. template -inline PolymorphicMatcher > +inline PolymorphicMatcher> WhenDynamicCastTo(const Matcher& inner_matcher) { return MakePolymorphicMatcher( internal::WhenDynamicCastToMatcher(inner_matcher)); @@ -4316,12 +4350,10 @@ // Field(&Foo::number, Ge(5)) // matches a Foo object x if and only if x.number >= 5. template -inline PolymorphicMatcher< - internal::FieldMatcher > Field( +inline PolymorphicMatcher> Field( FieldType Class::*field, const FieldMatcher& matcher) { - return MakePolymorphicMatcher( - internal::FieldMatcher( - field, MatcherCast(matcher))); + return MakePolymorphicMatcher(internal::FieldMatcher( + field, MatcherCast(matcher))); // The call to MatcherCast() is required for supporting inner // matchers of compatible types. For example, it allows // Field(&Foo::bar, m) @@ -4331,7 +4363,7 @@ // Same as Field() but also takes the name of the field to provide better error // messages. template -inline PolymorphicMatcher > Field( +inline PolymorphicMatcher> Field( const std::string& field_name, FieldType Class::*field, const FieldMatcher& matcher) { return MakePolymorphicMatcher(internal::FieldMatcher( @@ -4344,7 +4376,7 @@ // matches a Foo object x if and only if x.str() starts with "hi". template inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const>> Property(PropertyType (Class::*property)() const, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( @@ -4361,7 +4393,7 @@ // better error messages. template inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const>> Property(const std::string& property_name, PropertyType (Class::*property)() const, const PropertyMatcher& matcher) { @@ -4374,8 +4406,8 @@ // The same as above but for reference-qualified member functions. template inline PolymorphicMatcher > -Property(PropertyType (Class::*property)() const &, + Class, PropertyType, PropertyType (Class::*)() const&>> +Property(PropertyType (Class::*property)() const&, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( internal::PropertyMatcher inline PolymorphicMatcher > + Class, PropertyType, PropertyType (Class::*)() const&>> Property(const std::string& property_name, - PropertyType (Class::*property)() const &, + PropertyType (Class::*property)() const&, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( internal::PropertyMatcher internal::ResultOfMatcher ResultOf( Callable callable, InnerMatcher matcher) { + return internal::ResultOfMatcher(std::move(callable), + std::move(matcher)); +} + +// Same as ResultOf() above, but also takes a description of the `callable` +// result to provide better error messages. +template +internal::ResultOfMatcher ResultOf( + const std::string& result_description, Callable callable, + InnerMatcher matcher) { return internal::ResultOfMatcher( - std::move(callable), std::move(matcher)); + result_description, std::move(callable), std::move(matcher)); } // String matchers. // Matches a string equal to str. template -PolymorphicMatcher > StrEq( +PolymorphicMatcher> StrEq( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), true, true)); @@ -4423,7 +4465,7 @@ // Matches a string not equal to str. template -PolymorphicMatcher > StrNe( +PolymorphicMatcher> StrNe( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), false, true)); @@ -4431,7 +4473,7 @@ // Matches a string equal to str, ignoring case. template -PolymorphicMatcher > StrCaseEq( +PolymorphicMatcher> StrCaseEq( const internal::StringLike& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(std::string(str), true, false)); @@ -4439,7 +4481,7 @@ // Matches a string not equal to str, ignoring case. template -PolymorphicMatcher > StrCaseNe( +PolymorphicMatcher> StrCaseNe( const internal::StringLike& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher( std::string(str), false, false)); @@ -4448,7 +4490,7 @@ // Creates a matcher that matches any string, std::string, or C string // that contains the given substring. template -PolymorphicMatcher > HasSubstr( +PolymorphicMatcher> HasSubstr( const internal::StringLike& substring) { return MakePolymorphicMatcher( internal::HasSubstrMatcher(std::string(substring))); @@ -4456,7 +4498,7 @@ // Matches a string that starts with 'prefix' (case-sensitive). template -PolymorphicMatcher > StartsWith( +PolymorphicMatcher> StartsWith( const internal::StringLike& prefix) { return MakePolymorphicMatcher( internal::StartsWithMatcher(std::string(prefix))); @@ -4464,7 +4506,7 @@ // Matches a string that ends with 'suffix' (case-sensitive). template -PolymorphicMatcher > EndsWith( +PolymorphicMatcher> EndsWith( const internal::StringLike& suffix) { return MakePolymorphicMatcher( internal::EndsWithMatcher(std::string(suffix))); @@ -4474,50 +4516,50 @@ // Wide string matchers. // Matches a string equal to str. -inline PolymorphicMatcher > StrEq( +inline PolymorphicMatcher> StrEq( const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, true, true)); } // Matches a string not equal to str. -inline PolymorphicMatcher > StrNe( +inline PolymorphicMatcher> StrNe( const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, false, true)); } // Matches a string equal to str, ignoring case. -inline PolymorphicMatcher > -StrCaseEq(const std::wstring& str) { +inline PolymorphicMatcher> StrCaseEq( + const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, true, false)); } // Matches a string not equal to str, ignoring case. -inline PolymorphicMatcher > -StrCaseNe(const std::wstring& str) { +inline PolymorphicMatcher> StrCaseNe( + const std::wstring& str) { return MakePolymorphicMatcher( internal::StrEqualityMatcher(str, false, false)); } // Creates a matcher that matches any ::wstring, std::wstring, or C wide string // that contains the given substring. -inline PolymorphicMatcher > HasSubstr( +inline PolymorphicMatcher> HasSubstr( const std::wstring& substring) { return MakePolymorphicMatcher( internal::HasSubstrMatcher(substring)); } // Matches a string that starts with 'prefix' (case-sensitive). -inline PolymorphicMatcher > -StartsWith(const std::wstring& prefix) { +inline PolymorphicMatcher> StartsWith( + const std::wstring& prefix) { return MakePolymorphicMatcher( internal::StartsWithMatcher(prefix)); } // Matches a string that ends with 'suffix' (case-sensitive). -inline PolymorphicMatcher > EndsWith( +inline PolymorphicMatcher> EndsWith( const std::wstring& suffix) { return MakePolymorphicMatcher( internal::EndsWithMatcher(suffix)); @@ -4612,8 +4654,8 @@ // predicate. The predicate can be any unary function or functor // whose return type can be implicitly converted to bool. template -inline PolymorphicMatcher > -Truly(Predicate pred) { +inline PolymorphicMatcher> Truly( + Predicate pred) { return MakePolymorphicMatcher(internal::TrulyMatcher(pred)); } @@ -4624,8 +4666,8 @@ // EXPECT_THAT(container, SizeIs(2)); // Checks container has 2 elements. // EXPECT_THAT(container, SizeIs(Le(2)); // Checks container has at most 2. template -inline internal::SizeIsMatcher -SizeIs(const SizeMatcher& size_matcher) { +inline internal::SizeIsMatcher SizeIs( + const SizeMatcher& size_matcher) { return internal::SizeIsMatcher(size_matcher); } @@ -4635,8 +4677,8 @@ // do not implement size(). The container must provide const_iterator (with // valid iterator_traits), begin() and end(). template -inline internal::BeginEndDistanceIsMatcher -BeginEndDistanceIs(const DistanceMatcher& distance_matcher) { +inline internal::BeginEndDistanceIsMatcher BeginEndDistanceIs( + const DistanceMatcher& distance_matcher) { return internal::BeginEndDistanceIsMatcher(distance_matcher); } @@ -4645,8 +4687,8 @@ // values that are included in one container but not the other. (Duplicate // values and order differences are not explained.) template -inline PolymorphicMatcher::type>> +inline PolymorphicMatcher< + internal::ContainerEqMatcher::type>> ContainerEq(const Container& rhs) { return MakePolymorphicMatcher(internal::ContainerEqMatcher(rhs)); } @@ -4654,9 +4696,8 @@ // Returns a matcher that matches a container that, when sorted using // the given comparator, matches container_matcher. template -inline internal::WhenSortedByMatcher -WhenSortedBy(const Comparator& comparator, - const ContainerMatcher& container_matcher) { +inline internal::WhenSortedByMatcher WhenSortedBy( + const Comparator& comparator, const ContainerMatcher& container_matcher) { return internal::WhenSortedByMatcher( comparator, container_matcher); } @@ -4666,9 +4707,9 @@ template inline internal::WhenSortedByMatcher WhenSorted(const ContainerMatcher& container_matcher) { - return - internal::WhenSortedByMatcher( - internal::LessComparator(), container_matcher); + return internal::WhenSortedByMatcher( + internal::LessComparator(), container_matcher); } // Matches an STL-style container or a native array that contains the @@ -4685,15 +4726,13 @@ rhs); } - // Supports the Pointwise(m, {a, b, c}) syntax. template -inline internal::PointwiseMatcher > Pointwise( +inline internal::PointwiseMatcher> Pointwise( const TupleMatcher& tuple_matcher, std::initializer_list rhs) { return Pointwise(tuple_matcher, std::vector(rhs)); } - // UnorderedPointwise(pair_matcher, rhs) matches an STL-style // container or a native array that contains the same number of // elements as in rhs, where in some permutation of the container, its @@ -4722,22 +4761,20 @@ RhsView::ConstReference(rhs_container); // Create a matcher for each element in rhs_container. - ::std::vector > matchers; - for (typename RhsStlContainer::const_iterator it = rhs_stl_container.begin(); - it != rhs_stl_container.end(); ++it) { - matchers.push_back( - internal::MatcherBindSecond(tuple2_matcher, *it)); + ::std::vector> matchers; + for (auto it = rhs_stl_container.begin(); it != rhs_stl_container.end(); + ++it) { + matchers.push_back(internal::MatcherBindSecond(tuple2_matcher, *it)); } // Delegate the work to UnorderedElementsAreArray(). return UnorderedElementsAreArray(matchers); } - // Supports the UnorderedPointwise(m, {a, b, c}) syntax. template inline internal::UnorderedElementsAreArrayMatcher< - typename internal::BoundSecondMatcher > + typename internal::BoundSecondMatcher> UnorderedPointwise(const Tuple2Matcher& tuple2_matcher, std::initializer_list rhs) { return UnorderedPointwise(tuple2_matcher, std::vector(rhs)); @@ -4943,16 +4980,16 @@ // to match a std::map that contains exactly one element whose key // is >= 5 and whose value equals "foo". template -inline internal::PairMatcher -Pair(FirstMatcher first_matcher, SecondMatcher second_matcher) { - return internal::PairMatcher( - first_matcher, second_matcher); +inline internal::PairMatcher Pair( + FirstMatcher first_matcher, SecondMatcher second_matcher) { + return internal::PairMatcher(first_matcher, + second_matcher); } namespace no_adl { // Conditional() creates a matcher that conditionally uses either the first or // second matcher provided. For example, we could create an `equal if, and only -// if' matcher using the Conditonal wrapper as follows: +// if' matcher using the Conditional wrapper as follows: // // EXPECT_THAT(result, Conditional(condition, Eq(expected), Ne(expected))); template @@ -4988,6 +5025,14 @@ const InnerMatcher& inner_matcher) { return internal::AddressMatcher(inner_matcher); } + +// Matches a base64 escaped string, when the unescaped string matches the +// internal matcher. +template +internal::WhenBase64UnescapedMatcher WhenBase64Unescaped( + const MatcherType& internal_matcher) { + return internal::WhenBase64UnescapedMatcher(internal_matcher); +} } // namespace no_adl // Returns a predicate that is satisfied by anything that matches the @@ -5006,8 +5051,8 @@ // Matches the value against the given matcher and explains the match // result to listener. template -inline bool ExplainMatchResult( - M matcher, const T& value, MatchResultListener* listener) { +inline bool ExplainMatchResult(M matcher, const T& value, + MatchResultListener* listener) { return SafeMatcherCast(matcher).MatchAndExplain(value, listener); } @@ -5166,7 +5211,9 @@ // // EXPECT_CALL(foo, Bar(_, _)).With(Eq()); template -inline InnerMatcher AllArgs(const InnerMatcher& matcher) { return matcher; } +inline InnerMatcher AllArgs(const InnerMatcher& matcher) { + return matcher; +} // Returns a matcher that matches the value of an optional<> type variable. // The matcher implementation only uses '!arg' and requires that the optional<> @@ -5184,7 +5231,7 @@ // Returns a matcher that matches the value of a absl::any type variable. template -PolymorphicMatcher > AnyWith( +PolymorphicMatcher> AnyWith( const Matcher& matcher) { return MakePolymorphicMatcher( internal::any_cast_matcher::AnyCastMatcher(matcher)); @@ -5195,7 +5242,7 @@ // functions. // It is compatible with std::variant. template -PolymorphicMatcher > VariantWith( +PolymorphicMatcher> VariantWith( const Matcher& matcher) { return MakePolymorphicMatcher( internal::variant_matcher::VariantMatcher(matcher)); @@ -5224,7 +5271,8 @@ template bool MatchAndExplain(const Err& err, MatchResultListener* listener) const { - *listener << "which contains .what() that "; + *listener << "which contains .what() (of value = " << err.what() + << ") that "; return matcher_.MatchAndExplain(err.what(), listener); } @@ -5374,12 +5422,14 @@ // tests. ASSERT_THAT(value, matcher) and EXPECT_THAT(value, matcher) // succeed if and only if the value matches the matcher. If the assertion // fails, the value and the description of the matcher will be printed. -#define ASSERT_THAT(value, matcher) ASSERT_PRED_FORMAT1(\ - ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) -#define EXPECT_THAT(value, matcher) EXPECT_PRED_FORMAT1(\ - ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) - -// MATCHER* macroses itself are listed below. +#define ASSERT_THAT(value, matcher) \ + ASSERT_PRED_FORMAT1( \ + ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) +#define EXPECT_THAT(value, matcher) \ + EXPECT_PRED_FORMAT1( \ + ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) + +// MATCHER* macros itself are listed below. #define MATCHER(name, description) \ class name##Matcher \ : public ::testing::internal::MatcherBaseImpl { \ @@ -5406,7 +5456,7 @@ return gmock_description; \ } \ return ::testing::internal::FormatMatcherDescription(negation, #name, \ - {}); \ + {}, {}); \ } \ }; \ }; \ @@ -5418,33 +5468,41 @@ const #define MATCHER_P(name, p0, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP, description, (p0)) -#define MATCHER_P2(name, p0, p1, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP2, description, (p0, p1)) -#define MATCHER_P3(name, p0, p1, p2, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP3, description, (p0, p1, p2)) -#define MATCHER_P4(name, p0, p1, p2, p3, description) \ - GMOCK_INTERNAL_MATCHER(name, name##MatcherP4, description, (p0, p1, p2, p3)) + GMOCK_INTERNAL_MATCHER(name, name##MatcherP, description, (#p0), (p0)) +#define MATCHER_P2(name, p0, p1, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP2, description, (#p0, #p1), \ + (p0, p1)) +#define MATCHER_P3(name, p0, p1, p2, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP3, description, (#p0, #p1, #p2), \ + (p0, p1, p2)) +#define MATCHER_P4(name, p0, p1, p2, p3, description) \ + GMOCK_INTERNAL_MATCHER(name, name##MatcherP4, description, \ + (#p0, #p1, #p2, #p3), (p0, p1, p2, p3)) #define MATCHER_P5(name, p0, p1, p2, p3, p4, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP5, description, \ - (p0, p1, p2, p3, p4)) + (#p0, #p1, #p2, #p3, #p4), (p0, p1, p2, p3, p4)) #define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP6, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5), \ (p0, p1, p2, p3, p4, p5)) #define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP7, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6), \ (p0, p1, p2, p3, p4, p5, p6)) #define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP8, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7), \ (p0, p1, p2, p3, p4, p5, p6, p7)) #define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP9, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7, #p8), \ (p0, p1, p2, p3, p4, p5, p6, p7, p8)) #define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description) \ GMOCK_INTERNAL_MATCHER(name, name##MatcherP10, description, \ + (#p0, #p1, #p2, #p3, #p4, #p5, #p6, #p7, #p8, #p9), \ (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) -#define GMOCK_INTERNAL_MATCHER(name, full_name, description, args) \ +#define GMOCK_INTERNAL_MATCHER(name, full_name, description, arg_names, args) \ template \ class full_name : public ::testing::internal::MatcherBaseImpl< \ full_name> { \ @@ -5473,7 +5531,7 @@ return gmock_description; \ } \ return ::testing::internal::FormatMatcherDescription( \ - negation, #name, \ + negation, #name, {GMOCK_PP_REMOVE_PARENS(arg_names)}, \ ::testing::internal::UniversalTersePrintTupleFieldsToStrings( \ ::std::tuple( \ GMOCK_INTERNAL_MATCHER_MEMBERS_USAGE(args)))); \ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-actions.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-actions.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-actions.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-actions.h @@ -27,12 +27,12 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used variadic actions. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-matchers.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-matchers.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-matchers.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-more-matchers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some matchers that depend on gmock-matchers.h. @@ -35,7 +34,8 @@ // Note that tests are implemented in gmock-matchers_test.cc rather than // gmock-more-matchers-test.cc. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_MATCHERS_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-nice-strict.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-nice-strict.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-nice-strict.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-nice-strict.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Implements class templates NiceMock, NaggyMock, and StrictMock. // // Given a mock class MockFoo that is created using Google Mock, @@ -58,7 +57,8 @@ // In particular, nesting NiceMock, NaggyMock, and StrictMock is NOT // supported. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_NICE_STRICT_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-spec-builders.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-spec-builders.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-spec-builders.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock-spec-builders.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements the ON_CALL() and EXPECT_CALL() macros. @@ -56,7 +55,8 @@ // where all clauses are optional, and .InSequence()/.After()/ // .WillOnce() can appear any number of times. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_ @@ -890,7 +890,7 @@ mutable Mutex mutex_; // Protects action_count_checked_. }; // class ExpectationBase -// Impements an expectation for the given function type. +// Implements an expectation for the given function type. template class TypedExpectation : public ExpectationBase { public: @@ -1510,7 +1510,7 @@ // Performs the default action of this mock function on the given // arguments and returns the result. Asserts (or throws if - // exceptions are enabled) with a helpful call descrption if there + // exceptions are enabled) with a helpful call description if there // is no valid return value. This method doesn't depend on the // mutable state of this object, and thus can be called concurrently // without locking. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/gmock.h @@ -27,13 +27,10 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This is the main header file a user should include. -// GOOGLETEST_CM0002 DO NOT DELETE - #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_H_ @@ -64,14 +61,15 @@ #include "gmock/gmock-more-matchers.h" #include "gmock/gmock-nice-strict.h" #include "gmock/internal/gmock-internal-utils.h" - -namespace testing { +#include "gmock/internal/gmock-port.h" // Declares Google Mock flags that we want a user to use programmatically. GMOCK_DECLARE_bool_(catch_leaked_mocks); GMOCK_DECLARE_string_(verbose); GMOCK_DECLARE_int32_(default_mock_behavior); +namespace testing { + // Initializes Google Mock. This must be called before running the // tests. In particular, it parses the command line for the flags // that Google Mock recognizes. Whenever a Google Mock flag is seen, diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/README.md b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/README.md --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/README.md +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/README.md @@ -14,3 +14,5 @@ * `GMOCK_DEFINE_bool_(name, default_val, doc)` * `GMOCK_DEFINE_int32_(name, default_val, doc)` * `GMOCK_DEFINE_string_(name, default_val, doc)` +* `GMOCK_FLAG_GET(flag_name)` +* `GMOCK_FLAG_SET(flag_name, value)` diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-generated-actions.h @@ -1,4 +1,5 @@ -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-matchers.h @@ -26,10 +26,11 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Injection point for custom user configurations. See README for details -// -// GOOGLETEST_CM0002 DO NOT DELETE + +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_MATCHERS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_MATCHERS_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-port.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-port.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-port.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/custom/gmock-port.h @@ -26,12 +26,13 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Injection point for custom user configurations. See README for details // // ** Custom implementation starts here ** -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-internal-utils.h @@ -27,22 +27,25 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file defines some utilities useful for implementing Google // Mock. They are subject to change without notice, so please DO NOT // USE THEM IN USER CODE. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_ #include + #include // NOLINT #include #include +#include + #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -63,7 +66,8 @@ // Joins a vector of strings as if they are fields of a tuple; returns // the joined string. -GTEST_API_ std::string JoinAsTuple(const Strings& fields); +GTEST_API_ std::string JoinAsKeyValueTuple( + const std::vector& names, const Strings& values); // Converts an identifier name to a space-separated list of lower-case // words. Each maximum substring of the form [A-Za-z][a-z]*|\d+ is @@ -78,6 +82,13 @@ inline const typename Pointer::element_type* GetRawPointer(const Pointer& p) { return p.get(); } +// This overload version is for std::reference_wrapper, which does not work with +// the overload above, as it does not have an `element_type`. +template +inline const Element* GetRawPointer(const std::reference_wrapper& r) { + return &r.get(); +} + // This overloaded version is for the raw pointer case. template inline Element* GetRawPointer(Element* p) { return p; } @@ -281,7 +292,7 @@ GTEST_API_ WithoutMatchers GetWithoutMatchers(); // Disable MSVC warnings for infinite recursion, since in this case the -// the recursion is unreachable. +// recursion is unreachable. #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable:4717) @@ -449,6 +460,8 @@ template constexpr size_t Function::ArgumentCount; +bool Base64Unescape(const std::string& encoded, std::string* decoded); + #ifdef _MSC_VER # pragma warning(pop) #endif diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-port.h b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-port.h --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-port.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/include/gmock/internal/gmock-port.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // Low-level types and utilities for porting Google Mock to various // platforms. All macros ending with _ and symbols defined in an // internal namespace are subject to change without notice. Code @@ -35,7 +34,8 @@ // end with _ are part of Google Mock's public API and can be used by // code outside Google Mock. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ #define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ @@ -69,19 +69,37 @@ #if !defined(GMOCK_DECLARE_bool_) // Macros for declaring flags. -# define GMOCK_DECLARE_bool_(name) extern GTEST_API_ bool GMOCK_FLAG(name) -# define GMOCK_DECLARE_int32_(name) extern GTEST_API_ int32_t GMOCK_FLAG(name) -# define GMOCK_DECLARE_string_(name) \ - extern GTEST_API_ ::std::string GMOCK_FLAG(name) +#define GMOCK_DECLARE_bool_(name) \ + namespace testing { \ + GTEST_API_ extern bool GMOCK_FLAG(name); \ + } static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DECLARE_int32_(name) \ + namespace testing { \ + GTEST_API_ extern int32_t GMOCK_FLAG(name); \ + } static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DECLARE_string_(name) \ + namespace testing { \ + GTEST_API_ extern ::std::string GMOCK_FLAG(name); \ + } static_assert(true, "no-op to require trailing semicolon") // Macros for defining flags. -# define GMOCK_DEFINE_bool_(name, default_val, doc) \ - GTEST_API_ bool GMOCK_FLAG(name) = (default_val) -# define GMOCK_DEFINE_int32_(name, default_val, doc) \ - GTEST_API_ int32_t GMOCK_FLAG(name) = (default_val) -# define GMOCK_DEFINE_string_(name, default_val, doc) \ - GTEST_API_ ::std::string GMOCK_FLAG(name) = (default_val) - +#define GMOCK_DEFINE_bool_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ bool GMOCK_FLAG(name) = (default_val); \ + } static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DEFINE_int32_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ int32_t GMOCK_FLAG(name) = (default_val); \ + } static_assert(true, "no-op to require trailing semicolon") +#define GMOCK_DEFINE_string_(name, default_val, doc) \ + namespace testing { \ + GTEST_API_ ::std::string GMOCK_FLAG(name) = (default_val); \ + } static_assert(true, "no-op to require trailing semicolon") #endif // !defined(GMOCK_DECLARE_bool_) +#if !defined(GMOCK_FLAG_GET) +#define GMOCK_FLAG_GET(name) ::testing::GMOCK_FLAG(name) +#define GMOCK_FLAG_SET(name, value) (void)(::testing::GMOCK_FLAG(name) = value) +#endif // !defined(GMOCK_FLAG_GET) + #endif // GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/README.md b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/README.md deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Please Note: - -Files in this directory are no longer supported by the maintainers. They -represent mostly historical artifacts and supported by the community only. There -is no guarantee whatsoever that these scripts still work. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/fuse_gmock_files.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/fuse_gmock_files.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/fuse_gmock_files.py +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""fuse_gmock_files.py v0.1.0. - -Fuses Google Mock and Google Test source code into two .h files and a .cc file. - -SYNOPSIS - fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR - - Scans GMOCK_ROOT_DIR for Google Mock and Google Test source - code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest - directory, and generates three files: - OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and - OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests - by adding OUTPUT_DIR to the include search path and linking - with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain - everything you need to use Google Mock. Hence you can - "install" Google Mock by copying them to wherever you want. - - GMOCK_ROOT_DIR can be omitted and defaults to the parent - directory of the directory holding this script. - -EXAMPLES - ./fuse_gmock_files.py fused_gmock - ./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock - -This tool is experimental. In particular, it assumes that there is no -conditional inclusion of Google Mock or Google Test headers. Please -report any problems to googlemock@googlegroups.com. You can read -https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md -for more -information. -""" - -from __future__ import print_function - -import os -import re -import sys - -__author__ = 'wan@google.com (Zhanyong Wan)' - -# We assume that this file is in the scripts/ directory in the Google -# Mock root directory. -DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') - -# We need to call into googletest/scripts/fuse_gtest_files.py. -sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts')) -import fuse_gtest_files as gtest # pylint:disable=g-import-not-at-top - -# Regex for matching -# '#include "gmock/..."'. -INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"') - -# Where to find the source seed files. -GMOCK_H_SEED = 'include/gmock/gmock.h' -GMOCK_ALL_CC_SEED = 'src/gmock-all.cc' - -# Where to put the generated files. -GTEST_H_OUTPUT = 'gtest/gtest.h' -GMOCK_H_OUTPUT = 'gmock/gmock.h' -GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc' - - -def GetGTestRootDir(gmock_root): - """Returns the root directory of Google Test.""" - - return os.path.join(gmock_root, '../googletest') - - -def ValidateGMockRootDir(gmock_root): - """Makes sure gmock_root points to a valid gmock root directory. - - The function aborts the program on failure. - - Args: - gmock_root: A string with the mock root directory. - """ - - gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root)) - gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED) - gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED) - - -def ValidateOutputDir(output_dir): - """Makes sure output_dir points to a valid output directory. - - The function aborts the program on failure. - - Args: - output_dir: A string representing the output directory. - """ - - gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT) - gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT) - gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT) - - -def FuseGMockH(gmock_root, output_dir): - """Scans folder gmock_root to generate gmock/gmock.h in output_dir.""" - - output_file = open(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w') - processed_files = set() # Holds all gmock headers we've processed. - - def ProcessFile(gmock_header_path): - """Processes the given gmock header file.""" - - # We don't process the same header twice. - if gmock_header_path in processed_files: - return - - processed_files.add(gmock_header_path) - - # Reads each line in the given gmock header. - - with open(os.path.join(gmock_root, gmock_header_path), 'r') as fh: - for line in fh: - m = INCLUDE_GMOCK_FILE_REGEX.match(line) - if m: - # '#include "gmock/..."' - # - let's process it recursively. - ProcessFile('include/' + m.group(1)) - else: - m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # '#include "gtest/foo.h"' - # We translate it to "gtest/gtest.h", regardless of what foo is, - # since all gtest headers are fused into gtest/gtest.h. - - # There is no need to #include gtest.h twice. - if gtest.GTEST_H_SEED not in processed_files: - processed_files.add(gtest.GTEST_H_SEED) - output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GMOCK_H_SEED) - output_file.close() - - -def FuseGMockAllCcToFile(gmock_root, output_file): - """Scans folder gmock_root to fuse gmock-all.cc into output_file.""" - - processed_files = set() - - def ProcessFile(gmock_source_file): - """Processes the given gmock source file.""" - - # We don't process the same #included file twice. - if gmock_source_file in processed_files: - return - - processed_files.add(gmock_source_file) - - # Reads each line in the given gmock source file. - - with open(os.path.join(gmock_root, gmock_source_file), 'r') as fh: - for line in fh: - m = INCLUDE_GMOCK_FILE_REGEX.match(line) - if m: - # '#include "gmock/foo.h"' - # We treat it as '#include "gmock/gmock.h"', as all other gmock - # headers are being fused into gmock.h and cannot be - # included directly. No need to - # #include "gmock/gmock.h" - # more than once. - - if GMOCK_H_SEED not in processed_files: - processed_files.add(GMOCK_H_SEED) - output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,)) - else: - m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # '#include "gtest/..."' - # There is no need to #include gtest.h as it has been - # #included by gtest-all.cc. - - pass - else: - m = gtest.INCLUDE_SRC_FILE_REGEX.match(line) - if m: - # It's '#include "src/foo"' - let's process it recursively. - ProcessFile(m.group(1)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GMOCK_ALL_CC_SEED) - - -def FuseGMockGTestAllCc(gmock_root, output_dir): - """Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir.""" - - with open(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), - 'w') as output_file: - # First, fuse gtest-all.cc into gmock-gtest-all.cc. - gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file) - # Next, append fused gmock-all.cc to gmock-gtest-all.cc. - FuseGMockAllCcToFile(gmock_root, output_file) - - -def FuseGMock(gmock_root, output_dir): - """Fuses gtest.h, gmock.h, and gmock-gtest-all.h.""" - - ValidateGMockRootDir(gmock_root) - ValidateOutputDir(output_dir) - - gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir) - FuseGMockH(gmock_root, output_dir) - FuseGMockGTestAllCc(gmock_root, output_dir) - - -def main(): - argc = len(sys.argv) - if argc == 2: - # fuse_gmock_files.py OUTPUT_DIR - FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1]) - elif argc == 3: - # fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR - FuseGMock(sys.argv[1], sys.argv[2]) - else: - print(__doc__) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/LICENSE b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/LICENSE deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [2007] Neal Norwitz - Portions Copyright [2007] Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README +++ /dev/null @@ -1,34 +0,0 @@ - -The Google Mock class generator is an application that is part of cppclean. -For more information about cppclean, visit http://code.google.com/p/cppclean/ - -The mock generator requires Python 2.3.5 or later. If you don't have Python -installed on your system, you will also need to install it. You can download -Python from: http://www.python.org/download/releases/ - -To use the Google Mock class generator, you need to call it -on the command line passing the header file and class for which you want -to generate a Google Mock class. - -Make sure to install the scripts somewhere in your path. Then you can -run the program. - - gmock_gen.py header-file.h [ClassName]... - -If no ClassNames are specified, all classes in the file are emitted. - -To change the indentation from the default of 2, set INDENT in -the environment. For example to use an indent of 4 spaces: - -INDENT=4 gmock_gen.py header-file.h ClassName - -This version was made from SVN revision 281 in the cppclean repository. - -Known Limitations ------------------ -Not all code will be generated properly. For example, when mocking templated -classes, the template information is lost. You will need to add the template -information manually. - -Not all permutations of using multiple pointers/references will be rendered -properly. These will also have to be fixed manually. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README.cppclean b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README.cppclean deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/README.cppclean +++ /dev/null @@ -1,115 +0,0 @@ -Goal: ------ - CppClean attempts to find problems in C++ source that slow development - in large code bases, for example various forms of unused code. - Unused code can be unused functions, methods, data members, types, etc - to unnecessary #include directives. Unnecessary #includes can cause - considerable extra compiles increasing the edit-compile-run cycle. - - The project home page is: http://code.google.com/p/cppclean/ - - -Features: ---------- - * Find and print C++ language constructs: classes, methods, functions, etc. - * Find classes with virtual methods, no virtual destructor, and no bases - * Find global/static data that are potential problems when using threads - * Unnecessary forward class declarations - * Unnecessary function declarations - * Undeclared function definitions - * (planned) Find unnecessary header files #included - - No direct reference to anything in the header - - Header is unnecessary if classes were forward declared instead - * (planned) Source files that reference headers not directly #included, - ie, files that rely on a transitive #include from another header - * (planned) Unused members (private, protected, & public) methods and data - * (planned) Store AST in a SQL database so relationships can be queried - -AST is Abstract Syntax Tree, a representation of parsed source code. -http://en.wikipedia.org/wiki/Abstract_syntax_tree - - -System Requirements: --------------------- - * Python 2.4 or later (2.3 probably works too) - * Works on Windows (untested), Mac OS X, and Unix - - -How to Run: ------------ - For all examples, it is assumed that cppclean resides in a directory called - /cppclean. - - To print warnings for classes with virtual methods, no virtual destructor and - no base classes: - - /cppclean/run.sh nonvirtual_dtors.py file1.h file2.h file3.cc ... - - To print all the functions defined in header file(s): - - /cppclean/run.sh functions.py file1.h file2.h ... - - All the commands take multiple files on the command line. Other programs - include: find_warnings, headers, methods, and types. Some other programs - are available, but used primarily for debugging. - - run.sh is a simple wrapper that sets PYTHONPATH to /cppclean and then - runs the program in /cppclean/cpp/PROGRAM.py. There is currently - no equivalent for Windows. Contributions for a run.bat file - would be greatly appreciated. - - -How to Configure: ------------------ - You can add a siteheaders.py file in /cppclean/cpp to configure where - to look for other headers (typically -I options passed to a compiler). - Currently two values are supported: _TRANSITIVE and GetIncludeDirs. - _TRANSITIVE should be set to a boolean value (True or False) indicating - whether to transitively process all header files. The default is False. - - GetIncludeDirs is a function that takes a single argument and returns - a sequence of directories to include. This can be a generator or - return a static list. - - def GetIncludeDirs(filename): - return ['/some/path/with/other/headers'] - - # Here is a more complicated example. - def GetIncludeDirs(filename): - yield '/path1' - yield os.path.join('/path2', os.path.dirname(filename)) - yield '/path3' - - -How to Test: ------------- - For all examples, it is assumed that cppclean resides in a directory called - /cppclean. The tests require - - cd /cppclean - make test - # To generate expected results after a change: - make expected - - -Current Status: ---------------- - The parser works pretty well for header files, parsing about 99% of Google's - header files. Anything which inspects structure of C++ source files should - work reasonably well. Function bodies are not transformed to an AST, - but left as tokens. Much work is still needed on finding unused header files - and storing an AST in a database. - - -Non-goals: ----------- - * Parsing all valid C++ source - * Handling invalid C++ source gracefully - * Compiling to machine code (or anything beyond an AST) - - -Contact: --------- - If you used cppclean, I would love to hear about your experiences - cppclean@googlegroups.com. Even if you don't use cppclean, I'd like to - hear from you. :-) (You can contact me directly at: nnorwitz@gmail.com) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/ast.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/ast.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/ast.py +++ /dev/null @@ -1,1773 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generate an Abstract Syntax Tree (AST) for C++.""" - -# FIXME: -# * Tokens should never be exported, need to convert to Nodes -# (return types, parameters, etc.) -# * Handle static class data for templatized classes -# * Handle casts (both C++ and C-style) -# * Handle conditions and loops (if/else, switch, for, while/do) -# -# TODO much, much later: -# * Handle #define -# * exceptions - - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - -import collections -import sys -import traceback - -from cpp import keywords -from cpp import tokenize -from cpp import utils - - -if not hasattr(builtins, 'reversed'): - # Support Python 2.3 and earlier. - def reversed(seq): - for i in range(len(seq)-1, -1, -1): - yield seq[i] - -if not hasattr(builtins, 'next'): - # Support Python 2.5 and earlier. - def next(obj): - return obj.next() - - -VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3) - -FUNCTION_NONE = 0x00 -FUNCTION_CONST = 0x01 -FUNCTION_VIRTUAL = 0x02 -FUNCTION_PURE_VIRTUAL = 0x04 -FUNCTION_CTOR = 0x08 -FUNCTION_DTOR = 0x10 -FUNCTION_ATTRIBUTE = 0x20 -FUNCTION_UNKNOWN_ANNOTATION = 0x40 -FUNCTION_THROW = 0x80 -FUNCTION_OVERRIDE = 0x100 - -""" -These are currently unused. Should really handle these properly at some point. - -TYPE_MODIFIER_INLINE = 0x010000 -TYPE_MODIFIER_EXTERN = 0x020000 -TYPE_MODIFIER_STATIC = 0x040000 -TYPE_MODIFIER_CONST = 0x080000 -TYPE_MODIFIER_REGISTER = 0x100000 -TYPE_MODIFIER_VOLATILE = 0x200000 -TYPE_MODIFIER_MUTABLE = 0x400000 - -TYPE_MODIFIER_MAP = { - 'inline': TYPE_MODIFIER_INLINE, - 'extern': TYPE_MODIFIER_EXTERN, - 'static': TYPE_MODIFIER_STATIC, - 'const': TYPE_MODIFIER_CONST, - 'register': TYPE_MODIFIER_REGISTER, - 'volatile': TYPE_MODIFIER_VOLATILE, - 'mutable': TYPE_MODIFIER_MUTABLE, - } -""" - -_INTERNAL_TOKEN = 'internal' -_NAMESPACE_POP = 'ns-pop' - - -# TODO(nnorwitz): use this as a singleton for templated_types, etc -# where we don't want to create a new empty dict each time. It is also const. -class _NullDict(object): - __contains__ = lambda self: False - keys = values = items = iterkeys = itervalues = iteritems = lambda self: () - - -# TODO(nnorwitz): move AST nodes into a separate module. -class Node(object): - """Base AST node.""" - - def __init__(self, start, end): - self.start = start - self.end = end - - def IsDeclaration(self): - """Returns bool if this node is a declaration.""" - return False - - def IsDefinition(self): - """Returns bool if this node is a definition.""" - return False - - def IsExportable(self): - """Returns bool if this node exportable from a header file.""" - return False - - def Requires(self, node): - """Does this AST node require the definition of the node passed in?""" - return False - - def XXX__str__(self): - return self._StringHelper(self.__class__.__name__, '') - - def _StringHelper(self, name, suffix): - if not utils.DEBUG: - return '%s(%s)' % (name, suffix) - return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix) - - def __repr__(self): - return str(self) - - -class Define(Node): - def __init__(self, start, end, name, definition): - Node.__init__(self, start, end) - self.name = name - self.definition = definition - - def __str__(self): - value = '%s %s' % (self.name, self.definition) - return self._StringHelper(self.__class__.__name__, value) - - -class Include(Node): - def __init__(self, start, end, filename, system): - Node.__init__(self, start, end) - self.filename = filename - self.system = system - - def __str__(self): - fmt = '"%s"' - if self.system: - fmt = '<%s>' - return self._StringHelper(self.__class__.__name__, fmt % self.filename) - - -class Goto(Node): - def __init__(self, start, end, label): - Node.__init__(self, start, end) - self.label = label - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.label)) - - -class Expr(Node): - def __init__(self, start, end, expr): - Node.__init__(self, start, end) - self.expr = expr - - def Requires(self, node): - # TODO(nnorwitz): impl. - return False - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.expr)) - - -class Return(Expr): - pass - - -class Delete(Expr): - pass - - -class Friend(Expr): - def __init__(self, start, end, expr, namespace): - Expr.__init__(self, start, end, expr) - self.namespace = namespace[:] - - -class Using(Node): - def __init__(self, start, end, names): - Node.__init__(self, start, end) - self.names = names - - def __str__(self): - return self._StringHelper(self.__class__.__name__, str(self.names)) - - -class Parameter(Node): - def __init__(self, start, end, name, parameter_type, default): - Node.__init__(self, start, end) - self.name = name - self.type = parameter_type - self.default = default - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - return self.type.name == node.name - - def __str__(self): - name = str(self.type) - suffix = '%s %s' % (name, self.name) - if self.default: - suffix += ' = ' + ''.join([d.name for d in self.default]) - return self._StringHelper(self.__class__.__name__, suffix) - - -class _GenericDeclaration(Node): - def __init__(self, start, end, name, namespace): - Node.__init__(self, start, end) - self.name = name - self.namespace = namespace[:] - - def FullName(self): - prefix = '' - if self.namespace and self.namespace[-1]: - prefix = '::'.join(self.namespace) + '::' - return prefix + self.name - - def _TypeStringHelper(self, suffix): - if self.namespace: - names = [n or '' for n in self.namespace] - suffix += ' in ' + '::'.join(names) - return self._StringHelper(self.__class__.__name__, suffix) - - -# TODO(nnorwitz): merge with Parameter in some way? -class VariableDeclaration(_GenericDeclaration): - def __init__(self, start, end, name, var_type, initial_value, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.type = var_type - self.initial_value = initial_value - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - return self.type.name == node.name - - def ToString(self): - """Return a string that tries to reconstitute the variable decl.""" - suffix = '%s %s' % (self.type, self.name) - if self.initial_value: - suffix += ' = ' + self.initial_value - return suffix - - def __str__(self): - return self._StringHelper(self.__class__.__name__, self.ToString()) - - -class Typedef(_GenericDeclaration): - def __init__(self, start, end, name, alias, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.alias = alias - - def IsDefinition(self): - return True - - def IsExportable(self): - return True - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - name = node.name - for token in self.alias: - if token is not None and name == token.name: - return True - return False - - def __str__(self): - suffix = '%s, %s' % (self.name, self.alias) - return self._TypeStringHelper(suffix) - - -class _NestedType(_GenericDeclaration): - def __init__(self, start, end, name, fields, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.fields = fields - - def IsDefinition(self): - return True - - def IsExportable(self): - return True - - def __str__(self): - suffix = '%s, {%s}' % (self.name, self.fields) - return self._TypeStringHelper(suffix) - - -class Union(_NestedType): - pass - - -class Enum(_NestedType): - pass - - -class Class(_GenericDeclaration): - def __init__(self, start, end, name, bases, templated_types, body, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - self.bases = bases - self.body = body - self.templated_types = templated_types - - def IsDeclaration(self): - return self.bases is None and self.body is None - - def IsDefinition(self): - return not self.IsDeclaration() - - def IsExportable(self): - return not self.IsDeclaration() - - def Requires(self, node): - # TODO(nnorwitz): handle namespaces, etc. - if self.bases: - for token_list in self.bases: - # TODO(nnorwitz): bases are tokens, do name comparison. - for token in token_list: - if token.name == node.name: - return True - # TODO(nnorwitz): search in body too. - return False - - def __str__(self): - name = self.name - if self.templated_types: - name += '<%s>' % self.templated_types - suffix = '%s, %s, %s' % (name, self.bases, self.body) - return self._TypeStringHelper(suffix) - - -class Struct(Class): - pass - - -class Function(_GenericDeclaration): - def __init__(self, start, end, name, return_type, parameters, - modifiers, templated_types, body, namespace): - _GenericDeclaration.__init__(self, start, end, name, namespace) - converter = TypeConverter(namespace) - self.return_type = converter.CreateReturnType(return_type) - self.parameters = converter.ToParameters(parameters) - self.modifiers = modifiers - self.body = body - self.templated_types = templated_types - - def IsDeclaration(self): - return self.body is None - - def IsDefinition(self): - return self.body is not None - - def IsExportable(self): - if self.return_type and 'static' in self.return_type.modifiers: - return False - return None not in self.namespace - - def Requires(self, node): - if self.parameters: - # TODO(nnorwitz): parameters are tokens, do name comparison. - for p in self.parameters: - if p.name == node.name: - return True - # TODO(nnorwitz): search in body too. - return False - - def __str__(self): - # TODO(nnorwitz): add templated_types. - suffix = ('%s %s(%s), 0x%02x, %s' % - (self.return_type, self.name, self.parameters, - self.modifiers, self.body)) - return self._TypeStringHelper(suffix) - - -class Method(Function): - def __init__(self, start, end, name, in_class, return_type, parameters, - modifiers, templated_types, body, namespace): - Function.__init__(self, start, end, name, return_type, parameters, - modifiers, templated_types, body, namespace) - # TODO(nnorwitz): in_class could also be a namespace which can - # mess up finding functions properly. - self.in_class = in_class - - -class Type(_GenericDeclaration): - """Type used for any variable (eg class, primitive, struct, etc).""" - - def __init__(self, start, end, name, templated_types, modifiers, - reference, pointer, array): - """ - Args: - name: str name of main type - templated_types: [Class (Type?)] template type info between <> - modifiers: [str] type modifiers (keywords) eg, const, mutable, etc. - reference, pointer, array: bools - """ - _GenericDeclaration.__init__(self, start, end, name, []) - self.templated_types = templated_types - if not name and modifiers: - self.name = modifiers.pop() - self.modifiers = modifiers - self.reference = reference - self.pointer = pointer - self.array = array - - def __str__(self): - prefix = '' - if self.modifiers: - prefix = ' '.join(self.modifiers) + ' ' - name = str(self.name) - if self.templated_types: - name += '<%s>' % self.templated_types - suffix = prefix + name - if self.reference: - suffix += '&' - if self.pointer: - suffix += '*' - if self.array: - suffix += '[]' - return self._TypeStringHelper(suffix) - - # By definition, Is* are always False. A Type can only exist in - # some sort of variable declaration, parameter, or return value. - def IsDeclaration(self): - return False - - def IsDefinition(self): - return False - - def IsExportable(self): - return False - - -class TypeConverter(object): - - def __init__(self, namespace_stack): - self.namespace_stack = namespace_stack - - def _GetTemplateEnd(self, tokens, start): - count = 1 - end = start - while 1: - token = tokens[end] - end += 1 - if token.name == '<': - count += 1 - elif token.name == '>': - count -= 1 - if count == 0: - break - return tokens[start:end-1], end - - def ToType(self, tokens): - """Convert [Token,...] to [Class(...), ] useful for base classes. - For example, code like class Foo : public Bar { ... }; - the "Bar" portion gets converted to an AST. - - Returns: - [Class(...), ...] - """ - result = [] - name_tokens = [] - reference = pointer = array = False - - def AddType(templated_types): - # Partition tokens into name and modifier tokens. - names = [] - modifiers = [] - for t in name_tokens: - if keywords.IsKeyword(t.name): - modifiers.append(t.name) - else: - names.append(t.name) - name = ''.join(names) - if name_tokens: - result.append(Type(name_tokens[0].start, name_tokens[-1].end, - name, templated_types, modifiers, - reference, pointer, array)) - del name_tokens[:] - - i = 0 - end = len(tokens) - while i < end: - token = tokens[i] - if token.name == '<': - new_tokens, new_end = self._GetTemplateEnd(tokens, i+1) - AddType(self.ToType(new_tokens)) - # If there is a comma after the template, we need to consume - # that here otherwise it becomes part of the name. - i = new_end - reference = pointer = array = False - elif token.name == ',': - AddType([]) - reference = pointer = array = False - elif token.name == '*': - pointer = True - elif token.name == '&': - reference = True - elif token.name == '[': - pointer = True - elif token.name == ']': - pass - else: - name_tokens.append(token) - i += 1 - - if name_tokens: - # No '<' in the tokens, just a simple name and no template. - AddType([]) - return result - - def DeclarationToParts(self, parts, needs_name_removed): - name = None - default = [] - if needs_name_removed: - # Handle default (initial) values properly. - for i, t in enumerate(parts): - if t.name == '=': - default = parts[i+1:] - name = parts[i-1].name - if name == ']' and parts[i-2].name == '[': - name = parts[i-3].name - i -= 1 - parts = parts[:i-1] - break - else: - if parts[-1].token_type == tokenize.NAME: - name = parts.pop().name - else: - # TODO(nnorwitz): this is a hack that happens for code like - # Register(Foo); where it thinks this is a function call - # but it's actually a declaration. - name = '???' - modifiers = [] - type_name = [] - other_tokens = [] - templated_types = [] - i = 0 - end = len(parts) - while i < end: - p = parts[i] - if keywords.IsKeyword(p.name): - modifiers.append(p.name) - elif p.name == '<': - templated_tokens, new_end = self._GetTemplateEnd(parts, i+1) - templated_types = self.ToType(templated_tokens) - i = new_end - 1 - # Don't add a spurious :: to data members being initialized. - next_index = i + 1 - if next_index < end and parts[next_index].name == '::': - i += 1 - elif p.name in ('[', ']', '='): - # These are handled elsewhere. - other_tokens.append(p) - elif p.name not in ('*', '&', '>'): - # Ensure that names have a space between them. - if (type_name and type_name[-1].token_type == tokenize.NAME and - p.token_type == tokenize.NAME): - type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0)) - type_name.append(p) - else: - other_tokens.append(p) - i += 1 - type_name = ''.join([t.name for t in type_name]) - return name, type_name, templated_types, modifiers, default, other_tokens - - def ToParameters(self, tokens): - if not tokens: - return [] - - result = [] - name = type_name = '' - type_modifiers = [] - pointer = reference = array = False - first_token = None - default = [] - - def AddParameter(end): - if default: - del default[0] # Remove flag. - parts = self.DeclarationToParts(type_modifiers, True) - (name, type_name, templated_types, modifiers, - unused_default, unused_other_tokens) = parts - parameter_type = Type(first_token.start, first_token.end, - type_name, templated_types, modifiers, - reference, pointer, array) - p = Parameter(first_token.start, end, name, - parameter_type, default) - result.append(p) - - template_count = 0 - brace_count = 0 - for s in tokens: - if not first_token: - first_token = s - - # Check for braces before templates, as we can have unmatched '<>' - # inside default arguments. - if s.name == '{': - brace_count += 1 - elif s.name == '}': - brace_count -= 1 - if brace_count > 0: - type_modifiers.append(s) - continue - - if s.name == '<': - template_count += 1 - elif s.name == '>': - template_count -= 1 - if template_count > 0: - type_modifiers.append(s) - continue - - if s.name == ',': - AddParameter(s.start) - name = type_name = '' - type_modifiers = [] - pointer = reference = array = False - first_token = None - default = [] - elif s.name == '*': - pointer = True - elif s.name == '&': - reference = True - elif s.name == '[': - array = True - elif s.name == ']': - pass # Just don't add to type_modifiers. - elif s.name == '=': - # Got a default value. Add any value (None) as a flag. - default.append(None) - elif default: - default.append(s) - else: - type_modifiers.append(s) - AddParameter(tokens[-1].end) - return result - - def CreateReturnType(self, return_type_seq): - if not return_type_seq: - return None - start = return_type_seq[0].start - end = return_type_seq[-1].end - _, name, templated_types, modifiers, default, other_tokens = \ - self.DeclarationToParts(return_type_seq, False) - names = [n.name for n in other_tokens] - reference = '&' in names - pointer = '*' in names - array = '[' in names - return Type(start, end, name, templated_types, modifiers, - reference, pointer, array) - - def GetTemplateIndices(self, names): - # names is a list of strings. - start = names.index('<') - end = len(names) - 1 - while end > 0: - if names[end] == '>': - break - end -= 1 - return start, end+1 - -class AstBuilder(object): - def __init__(self, token_stream, filename, in_class='', visibility=None, - namespace_stack=[]): - self.tokens = token_stream - self.filename = filename - # TODO(nnorwitz): use a better data structure (deque) for the queue. - # Switching directions of the "queue" improved perf by about 25%. - # Using a deque should be even better since we access from both sides. - self.token_queue = [] - self.namespace_stack = namespace_stack[:] - self.in_class = in_class - if in_class is None: - self.in_class_name_only = None - else: - self.in_class_name_only = in_class.split('::')[-1] - self.visibility = visibility - self.in_function = False - self.current_token = None - # Keep the state whether we are currently handling a typedef or not. - self._handling_typedef = False - - self.converter = TypeConverter(self.namespace_stack) - - def HandleError(self, msg, token): - printable_queue = list(reversed(self.token_queue[-20:])) - sys.stderr.write('Got %s in %s @ %s %s\n' % - (msg, self.filename, token, printable_queue)) - - def Generate(self): - while 1: - token = self._GetNextToken() - if not token: - break - - # Get the next token. - self.current_token = token - - # Dispatch on the next token type. - if token.token_type == _INTERNAL_TOKEN: - if token.name == _NAMESPACE_POP: - self.namespace_stack.pop() - continue - - try: - result = self._GenerateOne(token) - if result is not None: - yield result - except: - self.HandleError('exception', token) - raise - - def _CreateVariable(self, pos_token, name, type_name, type_modifiers, - ref_pointer_name_seq, templated_types, value=None): - reference = '&' in ref_pointer_name_seq - pointer = '*' in ref_pointer_name_seq - array = '[' in ref_pointer_name_seq - var_type = Type(pos_token.start, pos_token.end, type_name, - templated_types, type_modifiers, - reference, pointer, array) - return VariableDeclaration(pos_token.start, pos_token.end, - name, var_type, value, self.namespace_stack) - - def _GenerateOne(self, token): - if token.token_type == tokenize.NAME: - if (keywords.IsKeyword(token.name) and - not keywords.IsBuiltinType(token.name)): - if token.name == 'enum': - # Pop the next token and only put it back if it's not - # 'class'. This allows us to support the two-token - # 'enum class' keyword as if it were simply 'enum'. - next = self._GetNextToken() - if next.name != 'class': - self._AddBackToken(next) - - method = getattr(self, 'handle_' + token.name) - return method() - elif token.name == self.in_class_name_only: - # The token name is the same as the class, must be a ctor if - # there is a paren. Otherwise, it's the return type. - # Peek ahead to get the next token to figure out which. - next = self._GetNextToken() - self._AddBackToken(next) - if next.token_type == tokenize.SYNTAX and next.name == '(': - return self._GetMethod([token], FUNCTION_CTOR, None, True) - # Fall through--handle like any other method. - - # Handle data or function declaration/definition. - syntax = tokenize.SYNTAX - temp_tokens, last_token = \ - self._GetVarTokensUpToIgnoringTemplates(syntax, - '(', ';', '{', '[') - temp_tokens.insert(0, token) - if last_token.name == '(': - # If there is an assignment before the paren, - # this is an expression, not a method. - expr = bool([e for e in temp_tokens if e.name == '=']) - if expr: - new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';') - temp_tokens.append(last_token) - temp_tokens.extend(new_temp) - last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0) - - if last_token.name == '[': - # Handle array, this isn't a method, unless it's an operator. - # TODO(nnorwitz): keep the size somewhere. - # unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']') - temp_tokens.append(last_token) - if temp_tokens[-2].name == 'operator': - temp_tokens.append(self._GetNextToken()) - else: - temp_tokens2, last_token = \ - self._GetVarTokensUpTo(tokenize.SYNTAX, ';') - temp_tokens.extend(temp_tokens2) - - if last_token.name == ';': - # Handle data, this isn't a method. - parts = self.converter.DeclarationToParts(temp_tokens, True) - (name, type_name, templated_types, modifiers, default, - unused_other_tokens) = parts - - t0 = temp_tokens[0] - names = [t.name for t in temp_tokens] - if templated_types: - start, end = self.converter.GetTemplateIndices(names) - names = names[:start] + names[end:] - default = ''.join([t.name for t in default]) - return self._CreateVariable(t0, name, type_name, modifiers, - names, templated_types, default) - if last_token.name == '{': - self._AddBackTokens(temp_tokens[1:]) - self._AddBackToken(last_token) - method_name = temp_tokens[0].name - method = getattr(self, 'handle_' + method_name, None) - if not method: - # Must be declaring a variable. - # TODO(nnorwitz): handle the declaration. - return None - return method() - return self._GetMethod(temp_tokens, 0, None, False) - elif token.token_type == tokenize.SYNTAX: - if token.name == '~' and self.in_class: - # Must be a dtor (probably not in method body). - token = self._GetNextToken() - # self.in_class can contain A::Name, but the dtor will only - # be Name. Make sure to compare against the right value. - if (token.token_type == tokenize.NAME and - token.name == self.in_class_name_only): - return self._GetMethod([token], FUNCTION_DTOR, None, True) - # TODO(nnorwitz): handle a lot more syntax. - elif token.token_type == tokenize.PREPROCESSOR: - # TODO(nnorwitz): handle more preprocessor directives. - # token starts with a #, so remove it and strip whitespace. - name = token.name[1:].lstrip() - if name.startswith('include'): - # Remove "include". - name = name[7:].strip() - assert name - # Handle #include \ "header-on-second-line.h". - if name.startswith('\\'): - name = name[1:].strip() - assert name[0] in '<"', token - assert name[-1] in '>"', token - system = name[0] == '<' - filename = name[1:-1] - return Include(token.start, token.end, filename, system) - if name.startswith('define'): - # Remove "define". - name = name[6:].strip() - assert name - value = '' - for i, c in enumerate(name): - if c.isspace(): - value = name[i:].lstrip() - name = name[:i] - break - return Define(token.start, token.end, name, value) - if name.startswith('if') and name[2:3].isspace(): - condition = name[3:].strip() - if condition.startswith('0') or condition.startswith('(0)'): - self._SkipIf0Blocks() - return None - - def _GetTokensUpTo(self, expected_token_type, expected_token): - return self._GetVarTokensUpTo(expected_token_type, expected_token)[0] - - def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens): - last_token = self._GetNextToken() - tokens = [] - while (last_token.token_type != expected_token_type or - last_token.name not in expected_tokens): - tokens.append(last_token) - last_token = self._GetNextToken() - return tokens, last_token - - # Same as _GetVarTokensUpTo, but skips over '<...>' which could contain an - # expected token. - def _GetVarTokensUpToIgnoringTemplates(self, expected_token_type, - *expected_tokens): - last_token = self._GetNextToken() - tokens = [] - nesting = 0 - while (nesting > 0 or - last_token.token_type != expected_token_type or - last_token.name not in expected_tokens): - tokens.append(last_token) - last_token = self._GetNextToken() - if last_token.name == '<': - nesting += 1 - elif last_token.name == '>': - nesting -= 1 - return tokens, last_token - - # TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necessary. - def _IgnoreUpTo(self, token_type, token): - unused_tokens = self._GetTokensUpTo(token_type, token) - - def _SkipIf0Blocks(self): - count = 1 - while 1: - token = self._GetNextToken() - if token.token_type != tokenize.PREPROCESSOR: - continue - - name = token.name[1:].lstrip() - if name.startswith('endif'): - count -= 1 - if count == 0: - break - elif name.startswith('if'): - count += 1 - - def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None): - if GetNextToken is None: - GetNextToken = self._GetNextToken - # Assumes the current token is open_paren and we will consume - # and return up to the close_paren. - count = 1 - token = GetNextToken() - while 1: - if token.token_type == tokenize.SYNTAX: - if token.name == open_paren: - count += 1 - elif token.name == close_paren: - count -= 1 - if count == 0: - break - yield token - token = GetNextToken() - yield token - - def _GetParameters(self): - return self._GetMatchingChar('(', ')') - - def GetScope(self): - return self._GetMatchingChar('{', '}') - - def _GetNextToken(self): - if self.token_queue: - return self.token_queue.pop() - try: - return next(self.tokens) - except StopIteration: - return - - def _AddBackToken(self, token): - if token.whence == tokenize.WHENCE_STREAM: - token.whence = tokenize.WHENCE_QUEUE - self.token_queue.insert(0, token) - else: - assert token.whence == tokenize.WHENCE_QUEUE, token - self.token_queue.append(token) - - def _AddBackTokens(self, tokens): - if tokens: - if tokens[-1].whence == tokenize.WHENCE_STREAM: - for token in tokens: - token.whence = tokenize.WHENCE_QUEUE - self.token_queue[:0] = reversed(tokens) - else: - assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens - self.token_queue.extend(reversed(tokens)) - - def GetName(self, seq=None): - """Returns ([tokens], next_token_info).""" - GetNextToken = self._GetNextToken - if seq is not None: - it = iter(seq) - GetNextToken = lambda: next(it) - next_token = GetNextToken() - tokens = [] - last_token_was_name = False - while (next_token.token_type == tokenize.NAME or - (next_token.token_type == tokenize.SYNTAX and - next_token.name in ('::', '<'))): - # Two NAMEs in a row means the identifier should terminate. - # It's probably some sort of variable declaration. - if last_token_was_name and next_token.token_type == tokenize.NAME: - break - last_token_was_name = next_token.token_type == tokenize.NAME - tokens.append(next_token) - # Handle templated names. - if next_token.name == '<': - tokens.extend(self._GetMatchingChar('<', '>', GetNextToken)) - last_token_was_name = True - next_token = GetNextToken() - return tokens, next_token - - def GetMethod(self, modifiers, templated_types): - return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') - assert len(return_type_and_name) >= 1 - return self._GetMethod(return_type_and_name, modifiers, templated_types, - False) - - def _GetMethod(self, return_type_and_name, modifiers, templated_types, - get_paren): - template_portion = None - if get_paren: - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - if token.name == '<': - # Handle templatized dtors. - template_portion = [token] - template_portion.extend(self._GetMatchingChar('<', '>')) - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '(', token - - name = return_type_and_name.pop() - # Handle templatized ctors. - if name.name == '>': - index = 1 - while return_type_and_name[index].name != '<': - index += 1 - template_portion = return_type_and_name[index:] + [name] - del return_type_and_name[index:] - name = return_type_and_name.pop() - elif name.name == ']': - rt = return_type_and_name - assert rt[-1].name == '[', return_type_and_name - assert rt[-2].name == 'operator', return_type_and_name - name_seq = return_type_and_name[-2:] - del return_type_and_name[-2:] - name = tokenize.Token(tokenize.NAME, 'operator[]', - name_seq[0].start, name.end) - # Get the open paren so _GetParameters() below works. - unused_open_paren = self._GetNextToken() - - # TODO(nnorwitz): store template_portion. - return_type = return_type_and_name - indices = name - if return_type: - indices = return_type[0] - - # Force ctor for templatized ctors. - if name.name == self.in_class and not modifiers: - modifiers |= FUNCTION_CTOR - parameters = list(self._GetParameters()) - del parameters[-1] # Remove trailing ')'. - - # Handling operator() is especially weird. - if name.name == 'operator' and not parameters: - token = self._GetNextToken() - assert token.name == '(', token - parameters = list(self._GetParameters()) - del parameters[-1] # Remove trailing ')'. - - token = self._GetNextToken() - while token.token_type == tokenize.NAME: - modifier_token = token - token = self._GetNextToken() - if modifier_token.name == 'const': - modifiers |= FUNCTION_CONST - elif modifier_token.name == '__attribute__': - # TODO(nnorwitz): handle more __attribute__ details. - modifiers |= FUNCTION_ATTRIBUTE - assert token.name == '(', token - # Consume everything between the (parens). - unused_tokens = list(self._GetMatchingChar('(', ')')) - token = self._GetNextToken() - elif modifier_token.name == 'throw': - modifiers |= FUNCTION_THROW - assert token.name == '(', token - # Consume everything between the (parens). - unused_tokens = list(self._GetMatchingChar('(', ')')) - token = self._GetNextToken() - elif modifier_token.name == 'override': - modifiers |= FUNCTION_OVERRIDE - elif modifier_token.name == modifier_token.name.upper(): - # HACK(nnorwitz): assume that all upper-case names - # are some macro we aren't expanding. - modifiers |= FUNCTION_UNKNOWN_ANNOTATION - else: - self.HandleError('unexpected token', modifier_token) - - assert token.token_type == tokenize.SYNTAX, token - # Handle ctor initializers. - if token.name == ':': - # TODO(nnorwitz): anything else to handle for initializer list? - while token.name != ';' and token.name != '{': - token = self._GetNextToken() - - # Handle pointer to functions that are really data but look - # like method declarations. - if token.name == '(': - if parameters[0].name == '*': - # name contains the return type. - name = parameters.pop() - # parameters contains the name of the data. - modifiers = [p.name for p in parameters] - # Already at the ( to open the parameter list. - function_parameters = list(self._GetMatchingChar('(', ')')) - del function_parameters[-1] # Remove trailing ')'. - # TODO(nnorwitz): store the function_parameters. - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == ';', token - return self._CreateVariable(indices, name.name, indices.name, - modifiers, '', None) - # At this point, we got something like: - # return_type (type::*name_)(params); - # This is a data member called name_ that is a function pointer. - # With this code: void (sq_type::*field_)(string&); - # We get: name=void return_type=[] parameters=sq_type ... field_ - # TODO(nnorwitz): is return_type always empty? - # TODO(nnorwitz): this isn't even close to being correct. - # Just put in something so we don't crash and can move on. - real_name = parameters[-1] - modifiers = [p.name for p in self._GetParameters()] - del modifiers[-1] # Remove trailing ')'. - return self._CreateVariable(indices, real_name.name, indices.name, - modifiers, '', None) - - if token.name == '{': - body = list(self.GetScope()) - del body[-1] # Remove trailing '}'. - else: - body = None - if token.name == '=': - token = self._GetNextToken() - - if token.name == 'default' or token.name == 'delete': - # Ignore explicitly defaulted and deleted special members - # in C++11. - token = self._GetNextToken() - else: - # Handle pure-virtual declarations. - assert token.token_type == tokenize.CONSTANT, token - assert token.name == '0', token - modifiers |= FUNCTION_PURE_VIRTUAL - token = self._GetNextToken() - - if token.name == '[': - # TODO(nnorwitz): store tokens and improve parsing. - # template char (&ASH(T (&seq)[N]))[N]; - tokens = list(self._GetMatchingChar('[', ']')) - token = self._GetNextToken() - - assert token.name == ';', (token, return_type_and_name, parameters) - - # Looks like we got a method, not a function. - if len(return_type) > 2 and return_type[-1].name == '::': - return_type, in_class = \ - self._GetReturnTypeAndClassName(return_type) - return Method(indices.start, indices.end, name.name, in_class, - return_type, parameters, modifiers, templated_types, - body, self.namespace_stack) - return Function(indices.start, indices.end, name.name, return_type, - parameters, modifiers, templated_types, body, - self.namespace_stack) - - def _GetReturnTypeAndClassName(self, token_seq): - # Splitting the return type from the class name in a method - # can be tricky. For example, Return::Type::Is::Hard::To::Find(). - # Where is the return type and where is the class name? - # The heuristic used is to pull the last name as the class name. - # This includes all the templated type info. - # TODO(nnorwitz): if there is only One name like in the - # example above, punt and assume the last bit is the class name. - - # Ignore a :: prefix, if exists so we can find the first real name. - i = 0 - if token_seq[0].name == '::': - i = 1 - # Ignore a :: suffix, if exists. - end = len(token_seq) - 1 - if token_seq[end-1].name == '::': - end -= 1 - - # Make a copy of the sequence so we can append a sentinel - # value. This is required for GetName will has to have some - # terminating condition beyond the last name. - seq_copy = token_seq[i:end] - seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0)) - names = [] - while i < end: - # Iterate through the sequence parsing out each name. - new_name, next = self.GetName(seq_copy[i:]) - assert new_name, 'Got empty new_name, next=%s' % next - # We got a pointer or ref. Add it to the name. - if next and next.token_type == tokenize.SYNTAX: - new_name.append(next) - names.append(new_name) - i += len(new_name) - - # Now that we have the names, it's time to undo what we did. - - # Remove the sentinel value. - names[-1].pop() - # Flatten the token sequence for the return type. - return_type = [e for seq in names[:-1] for e in seq] - # The class name is the last name. - class_name = names[-1] - return return_type, class_name - - def handle_bool(self): - pass - - def handle_char(self): - pass - - def handle_int(self): - pass - - def handle_long(self): - pass - - def handle_short(self): - pass - - def handle_double(self): - pass - - def handle_float(self): - pass - - def handle_void(self): - pass - - def handle_wchar_t(self): - pass - - def handle_unsigned(self): - pass - - def handle_signed(self): - pass - - def _GetNestedType(self, ctor): - name = None - name_tokens, token = self.GetName() - if name_tokens: - name = ''.join([t.name for t in name_tokens]) - - # Handle forward declarations. - if token.token_type == tokenize.SYNTAX and token.name == ';': - return ctor(token.start, token.end, name, None, - self.namespace_stack) - - if token.token_type == tokenize.NAME and self._handling_typedef: - self._AddBackToken(token) - return ctor(token.start, token.end, name, None, - self.namespace_stack) - - # Must be the type declaration. - fields = list(self._GetMatchingChar('{', '}')) - del fields[-1] # Remove trailing '}'. - if token.token_type == tokenize.SYNTAX and token.name == '{': - next = self._GetNextToken() - new_type = ctor(token.start, token.end, name, fields, - self.namespace_stack) - # A name means this is an anonymous type and the name - # is the variable declaration. - if next.token_type != tokenize.NAME: - return new_type - name = new_type - token = next - - # Must be variable declaration using the type prefixed with keyword. - assert token.token_type == tokenize.NAME, token - return self._CreateVariable(token, token.name, name, [], '', None) - - def handle_struct(self): - # Special case the handling typedef/aliasing of structs here. - # It would be a pain to handle in the class code. - name_tokens, var_token = self.GetName() - if name_tokens: - next_token = self._GetNextToken() - is_syntax = (var_token.token_type == tokenize.SYNTAX and - var_token.name[0] in '*&') - is_variable = (var_token.token_type == tokenize.NAME and - next_token.name == ';') - variable = var_token - if is_syntax and not is_variable: - variable = next_token - temp = self._GetNextToken() - if temp.token_type == tokenize.SYNTAX and temp.name == '(': - # Handle methods declared to return a struct. - t0 = name_tokens[0] - struct = tokenize.Token(tokenize.NAME, 'struct', - t0.start-7, t0.start-2) - type_and_name = [struct] - type_and_name.extend(name_tokens) - type_and_name.extend((var_token, next_token)) - return self._GetMethod(type_and_name, 0, None, False) - assert temp.name == ';', (temp, name_tokens, var_token) - if is_syntax or (is_variable and not self._handling_typedef): - modifiers = ['struct'] - type_name = ''.join([t.name for t in name_tokens]) - position = name_tokens[0] - return self._CreateVariable(position, variable.name, type_name, - modifiers, var_token.name, None) - name_tokens.extend((var_token, next_token)) - self._AddBackTokens(name_tokens) - else: - self._AddBackToken(var_token) - return self._GetClass(Struct, VISIBILITY_PUBLIC, None) - - def handle_union(self): - return self._GetNestedType(Union) - - def handle_enum(self): - return self._GetNestedType(Enum) - - def handle_auto(self): - # TODO(nnorwitz): warn about using auto? Probably not since it - # will be reclaimed and useful for C++0x. - pass - - def handle_register(self): - pass - - def handle_const(self): - pass - - def handle_inline(self): - pass - - def handle_extern(self): - pass - - def handle_static(self): - pass - - def handle_virtual(self): - # What follows must be a method. - token = token2 = self._GetNextToken() - if token.name == 'inline': - # HACK(nnorwitz): handle inline dtors by ignoring 'inline'. - token2 = self._GetNextToken() - if token2.token_type == tokenize.SYNTAX and token2.name == '~': - return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None) - assert token.token_type == tokenize.NAME or token.name == '::', token - return_type_and_name, _ = self._GetVarTokensUpToIgnoringTemplates( - tokenize.SYNTAX, '(') # ) - return_type_and_name.insert(0, token) - if token2 is not token: - return_type_and_name.insert(1, token2) - return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL, - None, False) - - def handle_volatile(self): - pass - - def handle_mutable(self): - pass - - def handle_public(self): - assert self.in_class - self.visibility = VISIBILITY_PUBLIC - - def handle_protected(self): - assert self.in_class - self.visibility = VISIBILITY_PROTECTED - - def handle_private(self): - assert self.in_class - self.visibility = VISIBILITY_PRIVATE - - def handle_friend(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - t0 = tokens[0] - return Friend(t0.start, t0.end, tokens, self.namespace_stack) - - def handle_static_cast(self): - pass - - def handle_const_cast(self): - pass - - def handle_dynamic_cast(self): - pass - - def handle_reinterpret_cast(self): - pass - - def handle_new(self): - pass - - def handle_delete(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - return Delete(tokens[0].start, tokens[0].end, tokens) - - def handle_typedef(self): - token = self._GetNextToken() - if (token.token_type == tokenize.NAME and - keywords.IsKeyword(token.name)): - # Token must be struct/enum/union/class. - method = getattr(self, 'handle_' + token.name) - self._handling_typedef = True - tokens = [method()] - self._handling_typedef = False - else: - tokens = [token] - - # Get the remainder of the typedef up to the semi-colon. - tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';')) - - # TODO(nnorwitz): clean all this up. - assert tokens - name = tokens.pop() - indices = name - if tokens: - indices = tokens[0] - if not indices: - indices = token - if name.name == ')': - # HACK(nnorwitz): Handle pointers to functions "properly". - if (len(tokens) >= 4 and - tokens[1].name == '(' and tokens[2].name == '*'): - tokens.append(name) - name = tokens[3] - elif name.name == ']': - # HACK(nnorwitz): Handle arrays properly. - if len(tokens) >= 2: - tokens.append(name) - name = tokens[1] - new_type = tokens - if tokens and isinstance(tokens[0], tokenize.Token): - new_type = self.converter.ToType(tokens)[0] - return Typedef(indices.start, indices.end, name.name, - new_type, self.namespace_stack) - - def handle_typeid(self): - pass # Not needed yet. - - def handle_typename(self): - pass # Not needed yet. - - def _GetTemplatedTypes(self): - result = collections.OrderedDict() - tokens = list(self._GetMatchingChar('<', '>')) - len_tokens = len(tokens) - 1 # Ignore trailing '>'. - i = 0 - while i < len_tokens: - key = tokens[i].name - i += 1 - if keywords.IsKeyword(key) or key == ',': - continue - type_name = default = None - if i < len_tokens: - i += 1 - if tokens[i-1].name == '=': - assert i < len_tokens, '%s %s' % (i, tokens) - default, unused_next_token = self.GetName(tokens[i:]) - i += len(default) - else: - if tokens[i-1].name != ',': - # We got something like: Type variable. - # Re-adjust the key (variable) and type_name (Type). - key = tokens[i-1].name - type_name = tokens[i-2] - - result[key] = (type_name, default) - return result - - def handle_template(self): - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '<', token - templated_types = self._GetTemplatedTypes() - # TODO(nnorwitz): for now, just ignore the template params. - token = self._GetNextToken() - if token.token_type == tokenize.NAME: - if token.name == 'class': - return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types) - elif token.name == 'struct': - return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types) - elif token.name == 'friend': - return self.handle_friend() - self._AddBackToken(token) - tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';') - tokens.append(last) - self._AddBackTokens(tokens) - if last.name == '(': - return self.GetMethod(FUNCTION_NONE, templated_types) - # Must be a variable definition. - return None - - def handle_true(self): - pass # Nothing to do. - - def handle_false(self): - pass # Nothing to do. - - def handle_asm(self): - pass # Not needed yet. - - def handle_class(self): - return self._GetClass(Class, VISIBILITY_PRIVATE, None) - - def _GetBases(self): - # Get base classes. - bases = [] - while 1: - token = self._GetNextToken() - assert token.token_type == tokenize.NAME, token - # TODO(nnorwitz): store kind of inheritance...maybe. - if token.name not in ('public', 'protected', 'private'): - # If inheritance type is not specified, it is private. - # Just put the token back so we can form a name. - # TODO(nnorwitz): it would be good to warn about this. - self._AddBackToken(token) - else: - # Check for virtual inheritance. - token = self._GetNextToken() - if token.name != 'virtual': - self._AddBackToken(token) - else: - # TODO(nnorwitz): store that we got virtual for this base. - pass - base, next_token = self.GetName() - bases_ast = self.converter.ToType(base) - assert len(bases_ast) == 1, bases_ast - bases.append(bases_ast[0]) - assert next_token.token_type == tokenize.SYNTAX, next_token - if next_token.name == '{': - token = next_token - break - # Support multiple inheritance. - assert next_token.name == ',', next_token - return bases, token - - def _GetClass(self, class_type, visibility, templated_types): - class_name = None - class_token = self._GetNextToken() - if class_token.token_type != tokenize.NAME: - assert class_token.token_type == tokenize.SYNTAX, class_token - token = class_token - else: - # Skip any macro (e.g. storage class specifiers) after the - # 'class' keyword. - next_token = self._GetNextToken() - if next_token.token_type == tokenize.NAME: - self._AddBackToken(next_token) - else: - self._AddBackTokens([class_token, next_token]) - name_tokens, token = self.GetName() - class_name = ''.join([t.name for t in name_tokens]) - bases = None - if token.token_type == tokenize.SYNTAX: - if token.name == ';': - # Forward declaration. - return class_type(class_token.start, class_token.end, - class_name, None, templated_types, None, - self.namespace_stack) - if token.name in '*&': - # Inline forward declaration. Could be method or data. - name_token = self._GetNextToken() - next_token = self._GetNextToken() - if next_token.name == ';': - # Handle data - modifiers = ['class'] - return self._CreateVariable(class_token, name_token.name, - class_name, - modifiers, token.name, None) - else: - # Assume this is a method. - tokens = (class_token, token, name_token, next_token) - self._AddBackTokens(tokens) - return self.GetMethod(FUNCTION_NONE, None) - if token.name == ':': - bases, token = self._GetBases() - - body = None - if token.token_type == tokenize.SYNTAX and token.name == '{': - assert token.token_type == tokenize.SYNTAX, token - assert token.name == '{', token - - ast = AstBuilder(self.GetScope(), self.filename, class_name, - visibility, self.namespace_stack) - body = list(ast.Generate()) - - if not self._handling_typedef: - token = self._GetNextToken() - if token.token_type != tokenize.NAME: - assert token.token_type == tokenize.SYNTAX, token - assert token.name == ';', token - else: - new_class = class_type(class_token.start, class_token.end, - class_name, bases, None, - body, self.namespace_stack) - - modifiers = [] - return self._CreateVariable(class_token, - token.name, new_class, - modifiers, token.name, None) - else: - if not self._handling_typedef: - self.HandleError('non-typedef token', token) - self._AddBackToken(token) - - return class_type(class_token.start, class_token.end, class_name, - bases, templated_types, body, self.namespace_stack) - - def handle_namespace(self): - # Support anonymous namespaces. - name = None - name_tokens, token = self.GetName() - if name_tokens: - name = ''.join([t.name for t in name_tokens]) - self.namespace_stack.append(name) - assert token.token_type == tokenize.SYNTAX, token - # Create an internal token that denotes when the namespace is complete. - internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP, - None, None) - internal_token.whence = token.whence - if token.name == '=': - # TODO(nnorwitz): handle aliasing namespaces. - name, next_token = self.GetName() - assert next_token.name == ';', next_token - self._AddBackToken(internal_token) - else: - assert token.name == '{', token - tokens = list(self.GetScope()) - # Replace the trailing } with the internal namespace pop token. - tokens[-1] = internal_token - # Handle namespace with nothing in it. - self._AddBackTokens(tokens) - return None - - def handle_using(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert tokens - return Using(tokens[0].start, tokens[0].end, tokens) - - def handle_explicit(self): - assert self.in_class - # Nothing much to do. - # TODO(nnorwitz): maybe verify the method name == class name. - # This must be a ctor. - return self.GetMethod(FUNCTION_CTOR, None) - - def handle_this(self): - pass # Nothing to do. - - def handle_operator(self): - # Pull off the next token(s?) and make that part of the method name. - pass - - def handle_sizeof(self): - pass - - def handle_case(self): - pass - - def handle_switch(self): - pass - - def handle_default(self): - token = self._GetNextToken() - assert token.token_type == tokenize.SYNTAX - assert token.name == ':' - - def handle_if(self): - pass - - def handle_else(self): - pass - - def handle_return(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - if not tokens: - return Return(self.current_token.start, self.current_token.end, None) - return Return(tokens[0].start, tokens[0].end, tokens) - - def handle_goto(self): - tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';') - assert len(tokens) == 1, str(tokens) - return Goto(tokens[0].start, tokens[0].end, tokens[0].name) - - def handle_try(self): - pass # Not needed yet. - - def handle_catch(self): - pass # Not needed yet. - - def handle_throw(self): - pass # Not needed yet. - - def handle_while(self): - pass - - def handle_do(self): - pass - - def handle_for(self): - pass - - def handle_break(self): - self._IgnoreUpTo(tokenize.SYNTAX, ';') - - def handle_continue(self): - self._IgnoreUpTo(tokenize.SYNTAX, ';') - - -def BuilderFromSource(source, filename): - """Utility method that returns an AstBuilder from source code. - - Args: - source: 'C++ source code' - filename: 'file1' - - Returns: - AstBuilder - """ - return AstBuilder(tokenize.GetTokens(source), filename) - - -def PrintIndentifiers(filename, should_print): - """Prints all identifiers for a C++ source file. - - Args: - filename: 'file1' - should_print: predicate with signature: bool Function(token) - """ - source = utils.ReadFile(filename, False) - if source is None: - sys.stderr.write('Unable to find: %s\n' % filename) - return - - #print('Processing %s' % actual_filename) - builder = BuilderFromSource(source, filename) - try: - for node in builder.Generate(): - if should_print(node): - print(node.name) - except KeyboardInterrupt: - return - except: - pass - - -def PrintAllIndentifiers(filenames, should_print): - """Prints all identifiers for each C++ source file in filenames. - - Args: - filenames: ['file1', 'file2', ...] - should_print: predicate with signature: bool Function(token) - """ - for path in filenames: - PrintIndentifiers(path, should_print) - - -def main(argv): - for filename in argv[1:]: - source = utils.ReadFile(filename) - if source is None: - continue - - print('Processing %s' % filename) - builder = BuilderFromSource(source, filename) - try: - entire_ast = filter(None, builder.Generate()) - except KeyboardInterrupt: - return - except: - # Already printed a warning, print the traceback and continue. - traceback.print_exc() - else: - if utils.DEBUG: - for ast in entire_ast: - print(ast) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generate Google Mock classes from base classes. - -This program will read in a C++ source file and output the Google Mock -classes for the specified classes. If no class is specified, all -classes in the source file are emitted. - -Usage: - gmock_class.py header-file.h [ClassName]... - -Output is sent to stdout. -""" - -import os -import re -import sys - -from cpp import ast -from cpp import utils - -# Preserve compatibility with Python 2.3. -try: - _dummy = set -except NameError: - import sets - - set = sets.Set - -_VERSION = (1, 0, 1) # The version of this script. -# How many spaces to indent. Can set me with the INDENT environment variable. -_INDENT = 2 - - -def _RenderType(ast_type): - """Renders the potentially recursively templated type into a string. - - Args: - ast_type: The AST of the type. - - Returns: - Rendered string of the type. - """ - # Add modifiers like 'const'. - modifiers = '' - if ast_type.modifiers: - modifiers = ' '.join(ast_type.modifiers) + ' ' - return_type = modifiers + ast_type.name - if ast_type.templated_types: - # Collect template args. - template_args = [] - for arg in ast_type.templated_types: - rendered_arg = _RenderType(arg) - template_args.append(rendered_arg) - return_type += '<' + ', '.join(template_args) + '>' - if ast_type.pointer: - return_type += '*' - if ast_type.reference: - return_type += '&' - return return_type - - -def _GenerateArg(source): - """Strips out comments, default arguments, and redundant spaces from a single argument. - - Args: - source: A string for a single argument. - - Returns: - Rendered string of the argument. - """ - # Remove end of line comments before eliminating newlines. - arg = re.sub(r'//.*', '', source) - - # Remove c-style comments. - arg = re.sub(r'/\*.*\*/', '', arg) - - # Remove default arguments. - arg = re.sub(r'=.*', '', arg) - - # Collapse spaces and newlines into a single space. - arg = re.sub(r'\s+', ' ', arg) - return arg.strip() - - -def _EscapeForMacro(s): - """Escapes a string for use as an argument to a C++ macro.""" - paren_count = 0 - for c in s: - if c == '(': - paren_count += 1 - elif c == ')': - paren_count -= 1 - elif c == ',' and paren_count == 0: - return '(' + s + ')' - return s - - -def _GenerateMethods(output_lines, source, class_node): - function_type = ( - ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL | ast.FUNCTION_OVERRIDE) - ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR - indent = ' ' * _INDENT - - for node in class_node.body: - # We only care about virtual functions. - if (isinstance(node, ast.Function) and node.modifiers & function_type and - not node.modifiers & ctor_or_dtor): - # Pick out all the elements we need from the original function. - modifiers = 'override' - if node.modifiers & ast.FUNCTION_CONST: - modifiers = 'const, ' + modifiers - - return_type = 'void' - if node.return_type: - return_type = _EscapeForMacro(_RenderType(node.return_type)) - - args = [] - for p in node.parameters: - arg = _GenerateArg(source[p.start:p.end]) - if arg != 'void': - args.append(_EscapeForMacro(arg)) - - # Create the mock method definition. - output_lines.extend([ - '%sMOCK_METHOD(%s, %s, (%s), (%s));' % - (indent, return_type, node.name, ', '.join(args), modifiers) - ]) - - -def _GenerateMocks(filename, source, ast_list, desired_class_names): - processed_class_names = set() - lines = [] - for node in ast_list: - if (isinstance(node, ast.Class) and node.body and - # desired_class_names being None means that all classes are selected. - (not desired_class_names or node.name in desired_class_names)): - class_name = node.name - parent_name = class_name - processed_class_names.add(class_name) - class_node = node - # Add namespace before the class. - if class_node.namespace: - lines.extend(['namespace %s {' % n for n in class_node.namespace]) # } - lines.append('') - - # Add template args for templated classes. - if class_node.templated_types: - # TODO(paulchang): Handle non-type template arguments (e.g. - # template). - - # class_node.templated_types is an OrderedDict from strings to a tuples. - # The key is the name of the template, and the value is - # (type_name, default). Both type_name and default could be None. - template_args = class_node.templated_types.keys() - template_decls = ['typename ' + arg for arg in template_args] - lines.append('template <' + ', '.join(template_decls) + '>') - parent_name += '<' + ', '.join(template_args) + '>' - - # Add the class prolog. - lines.append('class Mock%s : public %s {' # } - % (class_name, parent_name)) - lines.append('%spublic:' % (' ' * (_INDENT // 2))) - - # Add all the methods. - _GenerateMethods(lines, source, class_node) - - # Close the class. - if lines: - # If there are no virtual methods, no need for a public label. - if len(lines) == 2: - del lines[-1] - - # Only close the class if there really is a class. - lines.append('};') - lines.append('') # Add an extra newline. - - # Close the namespace. - if class_node.namespace: - for i in range(len(class_node.namespace) - 1, -1, -1): - lines.append('} // namespace %s' % class_node.namespace[i]) - lines.append('') # Add an extra newline. - - if desired_class_names: - missing_class_name_list = list(desired_class_names - processed_class_names) - if missing_class_name_list: - missing_class_name_list.sort() - sys.stderr.write('Class(es) not found in %s: %s\n' % - (filename, ', '.join(missing_class_name_list))) - elif not processed_class_names: - sys.stderr.write('No class found in %s\n' % filename) - - return lines - - -def main(argv=sys.argv): - if len(argv) < 2: - sys.stderr.write('Google Mock Class Generator v%s\n\n' % - '.'.join(map(str, _VERSION))) - sys.stderr.write(__doc__) - return 1 - - global _INDENT - try: - _INDENT = int(os.environ['INDENT']) - except KeyError: - pass - except: - sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT')) - - filename = argv[1] - desired_class_names = None # None means all classes in the source file. - if len(argv) >= 3: - desired_class_names = set(argv[2:]) - source = utils.ReadFile(filename) - if source is None: - return 1 - - builder = ast.BuilderFromSource(source, filename) - try: - entire_ast = filter(None, builder.Generate()) - except KeyboardInterrupt: - return - except: - # An error message was already printed since we couldn't parse. - sys.exit(1) - else: - lines = _GenerateMocks(filename, source, entire_ast, desired_class_names) - sys.stdout.write('\n'.join(lines)) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py +++ /dev/null @@ -1,570 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Neal Norwitz All Rights Reserved. -# Portions Copyright 2009 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for gmock.scripts.generator.cpp.gmock_class.""" - -import os -import sys -import unittest - -# Allow the cpp imports below to work when run as a standalone script. -sys.path.append(os.path.join(os.path.dirname(__file__), '..')) - -from cpp import ast -from cpp import gmock_class - - -class TestCase(unittest.TestCase): - """Helper class that adds assert methods.""" - - @staticmethod - def StripLeadingWhitespace(lines): - """Strip leading whitespace in each line in 'lines'.""" - return '\n'.join([s.lstrip() for s in lines.split('\n')]) - - def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines): - """Specialized assert that ignores the indent level.""" - self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines)) - - -class GenerateMethodsTest(TestCase): - - @staticmethod - def GenerateMethodSource(cpp_source): - """Convert C++ source to Google Mock output source lines.""" - method_source_lines = [] - # is a pseudo-filename, it is not read or written. - builder = ast.BuilderFromSource(cpp_source, '') - ast_list = list(builder.Generate()) - gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0]) - return '\n'.join(method_source_lines) - - def testSimpleMethod(self): - source = """ -class Foo { - public: - virtual int Bar(); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo(); - Foo(int x); - Foo(const Foo& f); - Foo(Foo&& f); - ~Foo(); - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testVirtualDestructor(self): - source = """ -class Foo { - public: - virtual ~Foo(); - virtual int Bar() = 0; -}; -""" - # The destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testExplicitlyDefaultedConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo() = default; - Foo(const Foo& f) = default; - Foo(Foo&& f) = default; - ~Foo() = default; - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testExplicitlyDeletedConstructorsAndDestructor(self): - source = """ -class Foo { - public: - Foo() = delete; - Foo(const Foo& f) = delete; - Foo(Foo&& f) = delete; - ~Foo() = delete; - virtual int Bar() = 0; -}; -""" - # The constructors and destructor should be ignored. - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleOverrideMethod(self): - source = """ -class Foo { - public: - int Bar() override; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleConstMethod(self): - source = """ -class Foo { - public: - virtual void Bar(bool flag) const; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (bool flag), (const, override));', - self.GenerateMethodSource(source)) - - def testExplicitVoid(self): - source = """ -class Foo { - public: - virtual int Bar(void); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testStrangeNewlineInParameter(self): - source = """ -class Foo { - public: - virtual void Bar(int -a) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a), (override));', - self.GenerateMethodSource(source)) - - def testDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar(int a, char c = 'x') = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, char c), (override));', - self.GenerateMethodSource(source)) - - def testMultipleDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar( - int a = 42, - char c = 'x', - const int* const p = nullptr, - const std::string& s = "42", - char tab[] = {'4','2'}, - int const *& rp = aDefaultPointer) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, ' - '(int a, char c, const int* const p, const std::string& s, char tab[], int const *& rp), ' - '(override));', self.GenerateMethodSource(source)) - - def testMultipleSingleLineDefaultParameters(self): - source = """ -class Foo { - public: - virtual void Bar(int a = 42, int b = 43, int c = 44) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, int b, int c), (override));', - self.GenerateMethodSource(source)) - - def testConstDefaultParameter(self): - source = """ -class Test { - public: - virtual bool Bar(const int test_arg = 42) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(bool, Bar, (const int test_arg), (override));', - self.GenerateMethodSource(source)) - - def testConstRefDefaultParameter(self): - source = """ -class Test { - public: - virtual bool Bar(const std::string& test_arg = "42" ) = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(bool, Bar, (const std::string& test_arg), (override));', - self.GenerateMethodSource(source)) - - def testRemovesCommentsWhenDefaultsArePresent(self): - source = """ -class Foo { - public: - virtual void Bar(int a = 42 /* a comment */, - char /* other comment */ c= 'x') = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, char c), (override));', - self.GenerateMethodSource(source)) - - def testDoubleSlashCommentsInParameterListAreRemoved(self): - source = """ -class Foo { - public: - virtual void Bar(int a, // inline comments should be elided. - int b // inline comments should be elided. - ) const = 0; -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(void, Bar, (int a, int b), (const, override));', - self.GenerateMethodSource(source)) - - def testCStyleCommentsInParameterListAreNotRemoved(self): - # NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these - # comments. Also note that C style comments after the last parameter - # are still elided. - source = """ -class Foo { - public: - virtual const string& Bar(int /* keeper */, int b); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(const string&, Bar, (int, int b), (override));', - self.GenerateMethodSource(source)) - - def testArgsOfTemplateTypes(self): - source = """ -class Foo { - public: - virtual int Bar(const vector& v, map* output); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (const vector& v, (map* output)), (override));', - self.GenerateMethodSource(source)) - - def testReturnTypeWithOneTemplateArg(self): - source = """ -class Foo { - public: - virtual vector* Bar(int n); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(vector*, Bar, (int n), (override));', - self.GenerateMethodSource(source)) - - def testReturnTypeWithManyTemplateArgs(self): - source = """ -class Foo { - public: - virtual map Bar(); -};""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD((map), Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testSimpleMethodInTemplatedClass(self): - source = """ -template -class Foo { - public: - virtual int Bar(); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (), (override));', - self.GenerateMethodSource(source)) - - def testPointerArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C*); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C*), (override));', - self.GenerateMethodSource(source)) - - def testReferenceArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C&); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C&), (override));', - self.GenerateMethodSource(source)) - - def testArrayArgWithoutNames(self): - source = """ -class Foo { - virtual int Bar(C[]); -}; -""" - self.assertEqualIgnoreLeadingWhitespace( - 'MOCK_METHOD(int, Bar, (C[]), (override));', - self.GenerateMethodSource(source)) - - -class GenerateMocksTest(TestCase): - - @staticmethod - def GenerateMocks(cpp_source): - """Convert C++ source to complete Google Mock output source.""" - # is a pseudo-filename, it is not read or written. - filename = '' - builder = ast.BuilderFromSource(cpp_source, filename) - ast_list = list(builder.Generate()) - lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None) - return '\n'.join(lines) - - def testNamespaces(self): - source = """ -namespace Foo { -namespace Bar { class Forward; } -namespace Baz::Qux { - -class Test { - public: - virtual void Foo(); -}; - -} // namespace Baz::Qux -} // namespace Foo -""" - expected = """\ -namespace Foo { -namespace Baz::Qux { - -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; - -} // namespace Baz::Qux -} // namespace Foo -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testClassWithStorageSpecifierMacro(self): - source = """ -class STORAGE_SPECIFIER Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedForwardDeclaration(self): - source = """ -template class Forward; // Forward declaration should be ignored. -class Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedClass(self): - source = """ -template -class Test { - public: - virtual void Foo(); -}; -""" - expected = """\ -template -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplateInATemplateTypedef(self): - source = """ -class Test { - public: - typedef std::vector> FooType; - virtual void Bar(const FooType& test_arg); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (const FooType& test_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplatedClassWithTemplatedArguments(self): - source = """ -template -class Test { - public: - virtual U Foo(T some_arg); -}; -""" - expected = """\ -template -class MockTest : public Test { -public: -MOCK_METHOD(U, Foo, (T some_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testTemplateInATemplateTypedefWithComma(self): - source = """ -class Test { - public: - typedef std::function>&, int> FooType; - virtual void Bar(const FooType& test_arg); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (const FooType& test_arg), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testParenthesizedCommaInArg(self): - source = """ -class Test { - public: - virtual void Bar(std::function f); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Bar, (std::function f), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testEnumType(self): - source = """ -class Test { - public: - enum Bar { - BAZ, QUX, QUUX, QUUUX - }; - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testEnumClassType(self): - source = """ -class Test { - public: - enum class Bar { - BAZ, QUX, QUUX, QUUUX - }; - virtual void Foo(); -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(void, Foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - def testStdFunction(self): - source = """ -class Test { - public: - Test(std::function foo) : foo_(foo) {} - - virtual std::function foo(); - - private: - std::function foo_; -}; -""" - expected = """\ -class MockTest : public Test { -public: -MOCK_METHOD(std::function, foo, (), (override)); -}; -""" - self.assertEqualIgnoreLeadingWhitespace(expected, - self.GenerateMocks(source)) - - -if __name__ == '__main__': - unittest.main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/keywords.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/keywords.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/keywords.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""C++ keywords and helper utilities for determining keywords.""" - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - - -if not hasattr(builtins, 'set'): - # Nominal support for Python 2.3. - from sets import Set as set - - -TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split()) -TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split()) -ACCESS = set('public protected private friend'.split()) - -CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split()) - -OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split()) -OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split()) - -CONTROL = set('case switch default if else return goto'.split()) -EXCEPTION = set('try catch throw'.split()) -LOOP = set('while do for break continue'.split()) - -ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP - - -def IsKeyword(token): - return token in ALL - -def IsBuiltinType(token): - if token in ('virtual', 'inline'): - # These only apply to methods, they can't be types by themselves. - return False - return token in TYPES or token in TYPE_MODIFIERS diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/tokenize.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/tokenize.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/tokenize.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tokenize C++ source code.""" - -try: - # Python 3.x - import builtins -except ImportError: - # Python 2.x - import __builtin__ as builtins - - -import sys - -from cpp import utils - - -if not hasattr(builtins, 'set'): - # Nominal support for Python 2.3. - from sets import Set as set - - -# Add $ as a valid identifier char since so much code uses it. -_letters = 'abcdefghijklmnopqrstuvwxyz' -VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$') -HEX_DIGITS = set('0123456789abcdefABCDEF') -INT_OR_FLOAT_DIGITS = set('01234567890eE-+') - - -# C++0x string preffixes. -_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR')) - - -# Token types. -UNKNOWN = 'UNKNOWN' -SYNTAX = 'SYNTAX' -CONSTANT = 'CONSTANT' -NAME = 'NAME' -PREPROCESSOR = 'PREPROCESSOR' - -# Where the token originated from. This can be used for backtracking. -# It is always set to WHENCE_STREAM in this code. -WHENCE_STREAM, WHENCE_QUEUE = range(2) - - -class Token(object): - """Data container to represent a C++ token. - - Tokens can be identifiers, syntax char(s), constants, or - pre-processor directives. - - start contains the index of the first char of the token in the source - end contains the index of the last char of the token in the source - """ - - def __init__(self, token_type, name, start, end): - self.token_type = token_type - self.name = name - self.start = start - self.end = end - self.whence = WHENCE_STREAM - - def __str__(self): - if not utils.DEBUG: - return 'Token(%r)' % self.name - return 'Token(%r, %s, %s)' % (self.name, self.start, self.end) - - __repr__ = __str__ - - -def _GetString(source, start, i): - i = source.find('"', i+1) - while source[i-1] == '\\': - # Count the trailing backslashes. - backslash_count = 1 - j = i - 2 - while source[j] == '\\': - backslash_count += 1 - j -= 1 - # When trailing backslashes are even, they escape each other. - if (backslash_count % 2) == 0: - break - i = source.find('"', i+1) - return i + 1 - - -def _GetChar(source, start, i): - # NOTE(nnorwitz): may not be quite correct, should be good enough. - i = source.find("'", i+1) - while source[i-1] == '\\': - # Need to special case '\\'. - if (i - 2) > start and source[i-2] == '\\': - break - i = source.find("'", i+1) - # Try to handle unterminated single quotes (in a #if 0 block). - if i < 0: - i = start - return i + 1 - - -def GetTokens(source): - """Returns a sequence of Tokens. - - Args: - source: string of C++ source code. - - Yields: - Token that represents the next token in the source. - """ - # Cache various valid character sets for speed. - valid_identifier_chars = VALID_IDENTIFIER_CHARS - hex_digits = HEX_DIGITS - int_or_float_digits = INT_OR_FLOAT_DIGITS - int_or_float_digits2 = int_or_float_digits | set('.') - - # Only ignore errors while in a #if 0 block. - ignore_errors = False - count_ifs = 0 - - i = 0 - end = len(source) - while i < end: - # Skip whitespace. - while i < end and source[i].isspace(): - i += 1 - if i >= end: - return - - token_type = UNKNOWN - start = i - c = source[i] - if c.isalpha() or c == '_': # Find a string token. - token_type = NAME - while source[i] in valid_identifier_chars: - i += 1 - # String and character constants can look like a name if - # they are something like L"". - if (source[i] == "'" and (i - start) == 1 and - source[start:i] in 'uUL'): - # u, U, and L are valid C++0x character preffixes. - token_type = CONSTANT - i = _GetChar(source, start, i) - elif source[i] == "'" and source[start:i] in _STR_PREFIXES: - token_type = CONSTANT - i = _GetString(source, start, i) - elif c == '/' and source[i+1] == '/': # Find // comments. - i = source.find('\n', i) - if i == -1: # Handle EOF. - i = end - continue - elif c == '/' and source[i+1] == '*': # Find /* comments. */ - i = source.find('*/', i) + 2 - continue - elif c in ':+-<>&|*=': # : or :: (plus other chars). - token_type = SYNTAX - i += 1 - new_ch = source[i] - if new_ch == c and c != '>': # Treat ">>" as two tokens. - i += 1 - elif c == '-' and new_ch == '>': - i += 1 - elif new_ch == '=': - i += 1 - elif c in '()[]{}~!?^%;/.,': # Handle single char tokens. - token_type = SYNTAX - i += 1 - if c == '.' and source[i].isdigit(): - token_type = CONSTANT - i += 1 - while source[i] in int_or_float_digits: - i += 1 - # Handle float suffixes. - for suffix in ('l', 'f'): - if suffix == source[i:i+1].lower(): - i += 1 - break - elif c.isdigit(): # Find integer. - token_type = CONSTANT - if c == '0' and source[i+1] in 'xX': - # Handle hex digits. - i += 2 - while source[i] in hex_digits: - i += 1 - else: - while source[i] in int_or_float_digits2: - i += 1 - # Handle integer (and float) suffixes. - for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'): - size = len(suffix) - if suffix == source[i:i+size].lower(): - i += size - break - elif c == '"': # Find string. - token_type = CONSTANT - i = _GetString(source, start, i) - elif c == "'": # Find char. - token_type = CONSTANT - i = _GetChar(source, start, i) - elif c == '#': # Find pre-processor command. - token_type = PREPROCESSOR - got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace() - if got_if: - count_ifs += 1 - elif source[i:i+6] == '#endif': - count_ifs -= 1 - if count_ifs == 0: - ignore_errors = False - - # TODO(nnorwitz): handle preprocessor statements (\ continuations). - while 1: - i1 = source.find('\n', i) - i2 = source.find('//', i) - i3 = source.find('/*', i) - i4 = source.find('"', i) - # NOTE(nnorwitz): doesn't handle comments in #define macros. - # Get the first important symbol (newline, comment, EOF/end). - i = min([x for x in (i1, i2, i3, i4, end) if x != -1]) - - # Handle #include "dir//foo.h" properly. - if source[i] == '"': - i = source.find('"', i+1) + 1 - assert i > 0 - continue - # Keep going if end of the line and the line ends with \. - if not (i == i1 and source[i-1] == '\\'): - if got_if: - condition = source[start+4:i].lstrip() - if (condition.startswith('0') or - condition.startswith('(0)')): - ignore_errors = True - break - i += 1 - elif c == '\\': # Handle \ in code. - # This is different from the pre-processor \ handling. - i += 1 - continue - elif ignore_errors: - # The tokenizer seems to be in pretty good shape. This - # raise is conditionally disabled so that bogus code - # in an #if 0 block can be handled. Since we will ignore - # it anyways, this is probably fine. So disable the - # exception and return the bogus char. - i += 1 - else: - sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' % - ('?', i, c, source[i-10:i+10])) - raise RuntimeError('unexpected token') - - if i <= 0: - print('Invalid index, exiting now.') - return - yield Token(token_type, source[start:i], start, i) - - -if __name__ == '__main__': - def main(argv): - """Driver mostly for testing purposes.""" - for filename in argv[1:]: - source = utils.ReadFile(filename) - if source is None: - continue - - for token in GetTokens(source): - print('%-12s: %s' % (token.token_type, token.name)) - # print('\r%6.2f%%' % (100.0 * index / token.end),) - sys.stdout.write('\n') - - - main(sys.argv) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/utils.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/utils.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007 Neal Norwitz -# Portions Copyright 2007 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generic utilities for C++ parsing.""" - -import sys - -# Set to True to see the start/end token indices. -DEBUG = True - - -def ReadFile(filename, print_error=True): - """Returns the contents of a file.""" - try: - fp = open(filename) - try: - return fp.read() - finally: - fp.close() - except IOError: - if print_error: - print('Error reading %s: %s' % (filename, sys.exc_info()[1])) - return None diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/gmock_gen.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/gmock_gen.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/gmock_gen.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Driver for starting up Google Mock class generator.""" - - -import os -import sys - -if __name__ == '__main__': - # Add the directory of this script to the path so we can import gmock_class. - sys.path.append(os.path.dirname(__file__)) - - from cpp import gmock_class - # Fix the docstring in case they require the usage. - gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__) - gmock_class.main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-internal-utils.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-internal-utils.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-internal-utils.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-internal-utils.cc @@ -37,8 +37,15 @@ #include "gmock/internal/gmock-internal-utils.h" #include + +#include +#include +#include +#include #include // NOLINT #include +#include + #include "gmock/gmock.h" #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -48,21 +55,22 @@ // Joins a vector of strings as if they are fields of a tuple; returns // the joined string. -GTEST_API_ std::string JoinAsTuple(const Strings& fields) { - switch (fields.size()) { - case 0: - return ""; - case 1: - return fields[0]; - default: - std::string result = "(" + fields[0]; - for (size_t i = 1; i < fields.size(); i++) { - result += ", "; - result += fields[i]; - } - result += ")"; - return result; +GTEST_API_ std::string JoinAsKeyValueTuple( + const std::vector& names, const Strings& values) { + GTEST_CHECK_(names.size() == values.size()); + if (values.empty()) { + return ""; + } + const auto build_one = [&](const size_t i) { + return std::string(names[i]) + ": " + values[i]; + }; + std::string result = "(" + build_one(0); + for (size_t i = 1; i < values.size(); i++) { + result += ", "; + result += build_one(i); } + result += ")"; + return result; } // Converts an identifier name to a space-separated list of lower-case @@ -126,10 +134,10 @@ // Returns true if and only if a log with the given severity is visible // according to the --gmock_verbose flag. GTEST_API_ bool LogIsVisible(LogSeverity severity) { - if (GMOCK_FLAG(verbose) == kInfoVerbosity) { + if (GMOCK_FLAG_GET(verbose) == kInfoVerbosity) { // Always show the log if --gmock_verbose=info. return true; - } else if (GMOCK_FLAG(verbose) == kErrorVerbosity) { + } else if (GMOCK_FLAG_GET(verbose) == kErrorVerbosity) { // Always hide it if --gmock_verbose=error. return false; } else { @@ -196,5 +204,53 @@ "the variable in various places."); } +constexpr char UnBase64Impl(char c, const char* const base64, char carry) { + return *base64 == 0 ? static_cast(65) + : *base64 == c ? carry + : UnBase64Impl(c, base64 + 1, carry + 1); +} + +template +constexpr std::array UnBase64Impl(IndexSequence, + const char* const base64) { + return {{UnBase64Impl(static_cast(I), base64, 0)...}}; +} + +constexpr std::array UnBase64(const char* const base64) { + return UnBase64Impl(MakeIndexSequence<256>{}, base64); +} + +static constexpr char kBase64[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +static constexpr std::array kUnBase64 = UnBase64(kBase64); + +bool Base64Unescape(const std::string& encoded, std::string* decoded) { + decoded->clear(); + size_t encoded_len = encoded.size(); + decoded->reserve(3 * (encoded_len / 4) + (encoded_len % 4)); + int bit_pos = 0; + char dst = 0; + for (int src : encoded) { + if (std::isspace(src) || src == '=') { + continue; + } + char src_bin = kUnBase64[static_cast(src)]; + if (src_bin >= 64) { + decoded->clear(); + return false; + } + if (bit_pos == 0) { + dst |= src_bin << 2; + bit_pos = 6; + } else { + dst |= static_cast(src_bin >> (bit_pos - 2)); + decoded->push_back(dst); + dst = static_cast(src_bin << (10 - bit_pos)); + bit_pos = (bit_pos + 6) % 8; + } + } + return true; +} + } // namespace internal } // namespace testing diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-matchers.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-matchers.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-matchers.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-matchers.cc @@ -36,9 +36,11 @@ #include "gmock/gmock-matchers.h" #include + #include #include #include +#include namespace testing { namespace internal { @@ -48,11 +50,13 @@ // 'negation' is false; otherwise returns the description of the // negation of the matcher. 'param_values' contains a list of strings // that are the print-out of the matcher's parameters. -GTEST_API_ std::string FormatMatcherDescription(bool negation, - const char* matcher_name, - const Strings& param_values) { +GTEST_API_ std::string FormatMatcherDescription( + bool negation, const char* matcher_name, + const std::vector& param_names, const Strings& param_values) { std::string result = ConvertIdentifierNameToWords(matcher_name); - if (param_values.size() >= 1) result += " " + JoinAsTuple(param_values); + if (param_values.size() >= 1) { + result += " " + JoinAsKeyValueTuple(param_names, param_values); + } return negation ? "not (" + result + ")" : result; } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-spec-builders.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-spec-builders.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-spec-builders.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock-spec-builders.cc @@ -264,10 +264,10 @@ ".Times() cannot appear " "more than once in an EXPECT_CALL()."); } else { - ExpectSpecProperty(last_clause_ < kTimes, - ".Times() cannot appear after " - ".InSequence(), .WillOnce(), .WillRepeatedly(), " - "or .RetiresOnSaturation()."); + ExpectSpecProperty( + last_clause_ < kTimes, + ".Times() may only appear *before* .InSequence(), .WillOnce(), " + ".WillRepeatedly(), or .RetiresOnSaturation(), not after."); } last_clause_ = kTimes; @@ -283,7 +283,7 @@ void ReportUninterestingCall(CallReaction reaction, const std::string& msg) { // Include a stack trace only if --gmock_verbose=info is specified. const int stack_frames_to_skip = - GMOCK_FLAG(verbose) == kInfoVerbosity ? 3 : -1; + GMOCK_FLAG_GET(verbose) == kInfoVerbosity ? 3 : -1; switch (reaction) { case kAllow: Log(kInfo, msg, stack_frames_to_skip); @@ -613,8 +613,7 @@ // object alive. Therefore we report any living object as test // failure, unless the user explicitly asked us to ignore it. ~MockObjectRegistry() { - if (!GMOCK_FLAG(catch_leaked_mocks)) - return; + if (!GMOCK_FLAG_GET(catch_leaked_mocks)) return; int leaked_count = 0; for (StateMap::const_iterator it = states_.begin(); it != states_.end(); @@ -716,9 +715,10 @@ const void* mock_obj) GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) { internal::MutexLock l(&internal::g_gmock_mutex); - return (g_uninteresting_call_reaction.count(mock_obj) == 0) ? - internal::intToCallReaction(GMOCK_FLAG(default_mock_behavior)) : - g_uninteresting_call_reaction[mock_obj]; + return (g_uninteresting_call_reaction.count(mock_obj) == 0) + ? internal::intToCallReaction( + GMOCK_FLAG_GET(default_mock_behavior)) + : g_uninteresting_call_reaction[mock_obj]; } // Tells Google Mock to ignore mock_obj when checking for leaked mock diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/src/gmock.cc @@ -31,13 +31,11 @@ #include "gmock/gmock.h" #include "gmock/internal/gmock-port.h" -namespace testing { - GMOCK_DEFINE_bool_(catch_leaked_mocks, true, "true if and only if Google Mock should report leaked " "mock objects as failures."); -GMOCK_DEFINE_string_(verbose, internal::kWarningVerbosity, +GMOCK_DEFINE_string_(verbose, testing::internal::kWarningVerbosity, "Controls how verbose Google Mock's output is." " Valid values:\n" " info - prints all messages.\n" @@ -51,6 +49,7 @@ " 1 - by default, mocks act as NaggyMocks.\n" " 2 - by default, mocks act as StrictMocks."); +namespace testing { namespace internal { // Parses a string as a command line flag. The string should have the @@ -59,18 +58,18 @@ // // Returns the value of the flag, or NULL if the parsing failed. static const char* ParseGoogleMockFlagValue(const char* str, - const char* flag, + const char* flag_name, bool def_optional) { // str and flag must not be NULL. - if (str == nullptr || flag == nullptr) return nullptr; + if (str == nullptr || flag_name == nullptr) return nullptr; // The flag must start with "--gmock_". - const std::string flag_str = std::string("--gmock_") + flag; - const size_t flag_len = flag_str.length(); - if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; + const std::string flag_name_str = std::string("--gmock_") + flag_name; + const size_t flag_name_len = flag_name_str.length(); + if (strncmp(str, flag_name_str.c_str(), flag_name_len) != 0) return nullptr; // Skips the flag name. - const char* flag_end = str + flag_len; + const char* flag_end = str + flag_name_len; // When def_optional is true, it's OK to not have a "=value" part. if (def_optional && (flag_end[0] == '\0')) { @@ -91,10 +90,10 @@ // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. -static bool ParseGoogleMockBoolFlag(const char* str, const char* flag, - bool* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + bool* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, true); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); // Aborts if the parsing failed. if (value_str == nullptr) return false; @@ -110,10 +109,10 @@ // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. template -static bool ParseGoogleMockStringFlag(const char* str, const char* flag, - String* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + String* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, false); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, false); // Aborts if the parsing failed. if (value_str == nullptr) return false; @@ -123,17 +122,17 @@ return true; } -static bool ParseGoogleMockIntFlag(const char* str, const char* flag, - int32_t* value) { +static bool ParseGoogleMockFlag(const char* str, const char* flag_name, + int32_t* value) { // Gets the value of the flag as a string. - const char* const value_str = ParseGoogleMockFlagValue(str, flag, true); + const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); // Aborts if the parsing failed. if (value_str == nullptr) return false; // Sets *value to the value of the flag. - return ParseInt32(Message() << "The value of flag --" << flag, - value_str, value); + return ParseInt32(Message() << "The value of flag --" << flag_name, value_str, + value); } // The internal implementation of InitGoogleMock(). @@ -152,11 +151,22 @@ const char* const arg = arg_string.c_str(); // Do we see a Google Mock flag? - if (ParseGoogleMockBoolFlag(arg, "catch_leaked_mocks", - &GMOCK_FLAG(catch_leaked_mocks)) || - ParseGoogleMockStringFlag(arg, "verbose", &GMOCK_FLAG(verbose)) || - ParseGoogleMockIntFlag(arg, "default_mock_behavior", - &GMOCK_FLAG(default_mock_behavior))) { + bool found_gmock_flag = false; + +#define GMOCK_INTERNAL_PARSE_FLAG(flag_name) \ + if (!found_gmock_flag) { \ + auto value = GMOCK_FLAG_GET(flag_name); \ + if (ParseGoogleMockFlag(arg, #flag_name, &value)) { \ + GMOCK_FLAG_SET(flag_name, value); \ + found_gmock_flag = true; \ + } \ + } + + GMOCK_INTERNAL_PARSE_FLAG(catch_leaked_mocks) + GMOCK_INTERNAL_PARSE_FLAG(verbose) + GMOCK_INTERNAL_PARSE_FLAG(default_mock_behavior) + + if (found_gmock_flag) { // Yes. Shift the remainder of the argv list left by one. Note // that argv has (*argc + 1) elements, the last one always being // NULL. The following loop moves the trailing NULL element as diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/BUILD.bazel b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/BUILD.bazel --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/BUILD.bazel +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/BUILD.bazel @@ -30,7 +30,6 @@ # # Bazel Build for Google C++ Testing Framework(Google Test)-googlemock -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_python//python:defs.bzl", "py_library", "py_test") licenses(["notice"]) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-actions_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-actions_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-actions_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-actions_test.cc @@ -384,7 +384,7 @@ EXPECT_EQ(5, action.Perform(std::make_tuple(true, 5))); } -// Tests that Action can be contructed from a pointer to +// Tests that Action can be constructed from a pointer to // ActionInterface. TEST(ActionTest, CanBeConstructedFromActionInterface) { Action action(new MyActionImpl); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-internal-utils_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-internal-utils_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-internal-utils_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-internal-utils_test.cc @@ -70,24 +70,23 @@ namespace { -TEST(JoinAsTupleTest, JoinsEmptyTuple) { - EXPECT_EQ("", JoinAsTuple(Strings())); +TEST(JoinAsKeyValueTupleTest, JoinsEmptyTuple) { + EXPECT_EQ("", JoinAsKeyValueTuple({}, Strings())); } -TEST(JoinAsTupleTest, JoinsOneTuple) { - const char* fields[] = {"1"}; - EXPECT_EQ("1", JoinAsTuple(Strings(fields, fields + 1))); +TEST(JoinAsKeyValueTupleTest, JoinsOneTuple) { + EXPECT_EQ("(a: 1)", JoinAsKeyValueTuple({"a"}, {"1"})); } -TEST(JoinAsTupleTest, JoinsTwoTuple) { - const char* fields[] = {"1", "a"}; - EXPECT_EQ("(1, a)", JoinAsTuple(Strings(fields, fields + 2))); +TEST(JoinAsKeyValueTupleTest, JoinsTwoTuple) { + EXPECT_EQ("(a: 1, b: 2)", JoinAsKeyValueTuple({"a", "b"}, {"1", "2"})); } -TEST(JoinAsTupleTest, JoinsTenTuple) { - const char* fields[] = {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}; - EXPECT_EQ("(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)", - JoinAsTuple(Strings(fields, fields + 10))); +TEST(JoinAsKeyValueTupleTest, JoinsTenTuple) { + EXPECT_EQ( + "(a: 1, b: 2, c: 3, d: 4, e: 5, f: 6, g: 7, h: 8, i: 9, j: 10)", + JoinAsKeyValueTuple({"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}, + {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"})); } TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContainsNoWord) { @@ -140,6 +139,12 @@ EXPECT_EQ(&n, GetRawPointer(&n)); } +TEST(GetRawPointerTest, WorksForStdReferenceWrapper) { + int n = 1; + EXPECT_EQ(&n, GetRawPointer(std::ref(n))); + EXPECT_EQ(&n, GetRawPointer(std::cref(n))); +} + // Tests KindOf. class Base {}; @@ -361,27 +366,27 @@ class LogIsVisibleTest : public ::testing::Test { protected: - void SetUp() override { original_verbose_ = GMOCK_FLAG(verbose); } + void SetUp() override { original_verbose_ = GMOCK_FLAG_GET(verbose); } - void TearDown() override { GMOCK_FLAG(verbose) = original_verbose_; } + void TearDown() override { GMOCK_FLAG_SET(verbose, original_verbose_); } std::string original_verbose_; }; TEST_F(LogIsVisibleTest, AlwaysReturnsTrueIfVerbosityIsInfo) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); EXPECT_TRUE(LogIsVisible(kInfo)); EXPECT_TRUE(LogIsVisible(kWarning)); } TEST_F(LogIsVisibleTest, AlwaysReturnsFalseIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); EXPECT_FALSE(LogIsVisible(kInfo)); EXPECT_FALSE(LogIsVisible(kWarning)); } TEST_F(LogIsVisibleTest, WorksWhenVerbosityIsWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); EXPECT_FALSE(LogIsVisible(kInfo)); EXPECT_TRUE(LogIsVisible(kWarning)); } @@ -394,8 +399,8 @@ // and log severity. void TestLogWithSeverity(const std::string& verbosity, LogSeverity severity, bool should_print) { - const std::string old_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = verbosity; + const std::string old_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, verbosity); CaptureStdout(); Log(severity, "Test log.\n", 0); if (should_print) { @@ -407,18 +412,18 @@ } else { EXPECT_STREQ("", GetCapturedStdout().c_str()); } - GMOCK_FLAG(verbose) = old_flag; + GMOCK_FLAG_SET(verbose, old_flag); } // Tests that when the stack_frames_to_skip parameter is negative, // Log() doesn't include the stack trace in the output. TEST(LogTest, NoStackTraceWhenStackFramesToSkipIsNegative) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = kInfoVerbosity; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, kInfoVerbosity); CaptureStdout(); Log(kInfo, "Test log.\n", -1); EXPECT_STREQ("\nTest log.\n", GetCapturedStdout().c_str()); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } struct MockStackTraceGetter : testing::internal::OsStackTraceGetterInterface { @@ -499,11 +504,11 @@ // Verifies that Log() behaves correctly for the given verbosity level // and log severity. std::string GrabOutput(void(*logger)(), const char* verbosity) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = verbosity; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, verbosity); CaptureStdout(); logger(); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); return GetCapturedStdout(); } @@ -716,6 +721,46 @@ F::MakeResultIgnoredValue>::value)); } +TEST(Base64Unescape, InvalidString) { + std::string unescaped; + EXPECT_FALSE(Base64Unescape("(invalid)", &unescaped)); +} + +TEST(Base64Unescape, ShortString) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQh", &unescaped)); + EXPECT_EQ("Hello world!", unescaped); +} + +TEST(Base64Unescape, ShortStringWithPadding) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ=", &unescaped)); + EXPECT_EQ("Hello world", unescaped); +} + +TEST(Base64Unescape, ShortStringWithoutPadding) { + std::string unescaped; + EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ", &unescaped)); + EXPECT_EQ("Hello world", unescaped); +} + +TEST(Base64Unescape, LongStringWithWhiteSpaces) { + std::string escaped = + R"(TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlz + IHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2Yg + dGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGlu + dWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRo + ZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=)"; + std::string expected = + "Man is distinguished, not only by his reason, but by this singular " + "passion from other animals, which is a lust of the mind, that by a " + "perseverance of delight in the continued and indefatigable generation " + "of knowledge, exceeds the short vehemence of any carnal pleasure."; + std::string unescaped; + EXPECT_TRUE(Base64Unescape(escaped, &unescaped)); + EXPECT_EQ(expected, unescaped); +} + } // namespace } // namespace internal } // namespace testing diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-matchers_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-matchers_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-matchers_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-matchers_test.cc @@ -1866,6 +1866,33 @@ EXPECT_EQ("ends with \"Hi\"", Describe(m)); } +// Tests WhenBase64Unescaped. + +TEST(WhenBase64UnescapedTest, MatchesUnescapedBase64Strings) { + const Matcher m1 = WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m1.Matches("invalid base64")); + EXPECT_FALSE(m1.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m1.Matches("aGVsbG8gd29ybGQh")); // hello world! + + const Matcher m2 = WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m2.Matches("invalid base64")); + EXPECT_FALSE(m2.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m2.Matches("aGVsbG8gd29ybGQh")); // hello world! + +#if GTEST_INTERNAL_HAS_STRING_VIEW + const Matcher m3 = + WhenBase64Unescaped(EndsWith("!")); + EXPECT_FALSE(m3.Matches("invalid base64")); + EXPECT_FALSE(m3.Matches("aGVsbG8gd29ybGQ=")); // hello world + EXPECT_TRUE(m3.Matches("aGVsbG8gd29ybGQh")); // hello world! +#endif // GTEST_INTERNAL_HAS_STRING_VIEW +} + +TEST(WhenBase64UnescapedTest, CanDescribeSelf) { + const Matcher m = WhenBase64Unescaped(EndsWith("!")); + EXPECT_EQ("matches after Base64Unescape ends with \"!\"", Describe(m)); +} + // Tests MatchesRegex(). TEST(MatchesRegexTest, MatchesStringMatchingGivenRegex) { @@ -4616,6 +4643,16 @@ "isn't equal to \"foo\"", DescribeNegation(matcher)); } +// Tests that ResultOf() can describe itself when provided a result description. +TEST(ResultOfTest, CanDescribeItselfWithResultDescription) { + Matcher matcher = + ResultOf("string conversion", &IntToStringFunction, StrEq("foo")); + + EXPECT_EQ("whose string conversion is equal to \"foo\"", Describe(matcher)); + EXPECT_EQ("whose string conversion isn't equal to \"foo\"", + DescribeNegation(matcher)); +} + // Tests that ResultOf() can explain the match result. int IntFunction(int input) { return input == 42 ? 80 : 90; } @@ -5397,12 +5434,14 @@ } private: - class ConstIter : public std::iterator { + class ConstIter { public: + using iterator_category = std::input_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = const value_type*; + using reference = const value_type&; + ConstIter(const Streamlike* s, typename std::list::iterator pos) : s_(s), pos_(pos) {} @@ -6410,19 +6449,16 @@ TEST(FormatMatcherDescriptionTest, WorksForEmptyDescription) { EXPECT_EQ("is even", - FormatMatcherDescription(false, "IsEven", Strings())); + FormatMatcherDescription(false, "IsEven", {}, Strings())); EXPECT_EQ("not (is even)", - FormatMatcherDescription(true, "IsEven", Strings())); + FormatMatcherDescription(true, "IsEven", {}, Strings())); - const char* params[] = {"5"}; - EXPECT_EQ("equals 5", - FormatMatcherDescription(false, "Equals", - Strings(params, params + 1))); + EXPECT_EQ("equals (a: 5)", + FormatMatcherDescription(false, "Equals", {"a"}, {"5"})); - const char* params2[] = {"5", "8"}; - EXPECT_EQ("is in range (5, 8)", - FormatMatcherDescription(false, "IsInRange", - Strings(params2, params2 + 2))); + EXPECT_EQ( + "is in range (a: 5, b: 8)", + FormatMatcherDescription(false, "IsInRange", {"a", "b"}, {"5", "8"})); } // Tests PolymorphicMatcher::mutable_impl(). @@ -7260,7 +7296,7 @@ EXPECT_EQ("isn't empty", DescribeNegation(m)); } -TEST(ElementsAreTest, CanDescribeNegationOfExpectingOneElment) { +TEST(ElementsAreTest, CanDescribeNegationOfExpectingOneElement) { Matcher&> m = ElementsAre(Gt(5)); EXPECT_EQ( "doesn't have 1 element, or\n" @@ -7781,8 +7817,8 @@ EXPECT_TRUE(m.Matches(36)); EXPECT_FALSE(m.Matches(5)); - EXPECT_EQ("is greater than 32 and 5", Describe(m)); - EXPECT_EQ("not (is greater than 32 and 5)", DescribeNegation(m)); + EXPECT_EQ("is greater than 32 and (n: 5)", Describe(m)); + EXPECT_EQ("not (is greater than 32 and (n: 5))", DescribeNegation(m)); EXPECT_EQ("", Explain(m, 36)); EXPECT_EQ("", Explain(m, 5)); } @@ -7793,8 +7829,8 @@ TEST(MatcherPMacroTest, GeneratesCorrectDescription) { const Matcher m = _is_Greater_Than32and_(5); - EXPECT_EQ("is greater than 32 and 5", Describe(m)); - EXPECT_EQ("not (is greater than 32 and 5)", DescribeNegation(m)); + EXPECT_EQ("is greater than 32 and (n: 5)", Describe(m)); + EXPECT_EQ("not (is greater than 32 and (n: 5))", DescribeNegation(m)); EXPECT_EQ("", Explain(m, 36)); EXPECT_EQ("", Explain(m, 5)); } @@ -7827,7 +7863,8 @@ // likely it will just annoy the user. If the address is // interesting, the user should consider passing the parameter by // pointer instead. - EXPECT_EQ("references uncopyable 1-byte object <31>", Describe(m)); + EXPECT_EQ("references uncopyable (variable: 1-byte object <31>)", + Describe(m)); } // Tests that the body of MATCHER_Pn() can reference the parameter @@ -7878,8 +7915,10 @@ // likely they will just annoy the user. If the addresses are // interesting, the user should consider passing the parameters by // pointers instead. - EXPECT_EQ("references any of (1-byte object <31>, 1-byte object <32>)", - Describe(m)); + EXPECT_EQ( + "references any of (variable1: 1-byte object <31>, variable2: 1-byte " + "object <32>)", + Describe(m)); } // Tests that a simple MATCHER_P2() definition works. @@ -7891,8 +7930,9 @@ EXPECT_TRUE(m.Matches(36L)); EXPECT_FALSE(m.Matches(15L)); - EXPECT_EQ("is not in closed range (10, 20)", Describe(m)); - EXPECT_EQ("not (is not in closed range (10, 20))", DescribeNegation(m)); + EXPECT_EQ("is not in closed range (low: 10, hi: 20)", Describe(m)); + EXPECT_EQ("not (is not in closed range (low: 10, hi: 20))", + DescribeNegation(m)); EXPECT_EQ("", Explain(m, 36L)); EXPECT_EQ("", Explain(m, 15L)); } @@ -8378,7 +8418,7 @@ // Explain with matchers const Matcher g1 = AnyOfArray({GreaterThan(1)}); const Matcher g2 = AnyOfArray({GreaterThan(1), GreaterThan(2)}); - // Explains the first positiv match and all prior negative matches... + // Explains the first positive match and all prior negative matches... EXPECT_EQ("which is 1 less than 1", Explain(g1, 0)); EXPECT_EQ("which is the same as 1", Explain(g1, 1)); EXPECT_EQ("which is 1 more than 1", Explain(g1, 2)); @@ -8488,6 +8528,12 @@ ThrowsMessage(HasSubstr("message"))); } +TEST(ThrowsTest, PrintsExceptionWhat) { + EXPECT_THAT( + std::function([]() { throw std::runtime_error("ABC123XYZ"); }), + ThrowsMessage(HasSubstr("ABC123XYZ"))); +} + TEST(ThrowsTest, DoesNotGenerateDuplicateCatchClauseWarning) { EXPECT_THAT(std::function([]() { throw std::exception(); }), Throws()); @@ -8603,15 +8649,6 @@ HasSubstr("throws an exception of an unknown type")); } -TEST_P(ThrowsPredicateTest, FailWrongMessage) { - Matcher> matcher = GetParam(); - StringMatchResultListener listener; - EXPECT_FALSE(matcher.MatchAndExplain( - []() { throw std::runtime_error("wrong message"); }, &listener)); - EXPECT_THAT(listener.str(), HasSubstr("std::runtime_error")); - EXPECT_THAT(listener.str(), Not(HasSubstr("wrong message"))); -} - TEST_P(ThrowsPredicateTest, FailNoThrow) { Matcher> matcher = GetParam(); StringMatchResultListener listener; diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-more-actions_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-more-actions_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-more-actions_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-more-actions_test.cc @@ -462,6 +462,12 @@ EXPECT_EQ("seven", a.Perform(std::make_tuple(5, 6, std::string("seven"), 8))); } +TEST(ReturnArgActionTest, WorksForNonConstRefArg0) { + const Action a = ReturnArg<0>(); + std::string s = "12345"; + EXPECT_EQ(&s, &a.Perform(std::forward_as_tuple(s))); +} + TEST(SaveArgActionTest, WorksForSameType) { int result = 0; const Action a1 = SaveArg<0>(&result); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-nice-strict_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-nice-strict_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-nice-strict_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-nice-strict_test.cc @@ -50,7 +50,6 @@ namespace testing { namespace gmock_nice_strict_test { -using testing::GMOCK_FLAG(verbose); using testing::HasSubstr; using testing::NaggyMock; using testing::NiceMock; @@ -140,8 +139,8 @@ // Tests that a raw mock generates warnings for uninteresting calls. TEST(RawMockTest, WarningForUninterestingCall) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); MockFoo raw_foo; @@ -151,14 +150,14 @@ EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a raw mock generates warnings for uninteresting calls // that delete the mock object. TEST(RawMockTest, WarningForUninterestingCallAfterDeath) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); MockFoo* const raw_foo = new MockFoo; @@ -170,7 +169,7 @@ EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a raw mock generates informational logs for @@ -178,14 +177,14 @@ TEST(RawMockTest, InfoForUninterestingCall) { MockFoo raw_foo; - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "info"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "info"); CaptureStdout(); raw_foo.DoThis(); EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } TEST(RawMockTest, IsNaggy_IsNice_IsStrict) { @@ -223,14 +222,14 @@ TEST(NiceMockTest, InfoForUninterestingCall) { NiceMock nice_foo; - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "info"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "info"); CaptureStdout(); nice_foo.DoThis(); EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -326,8 +325,8 @@ // Tests that a naggy mock generates warnings for uninteresting calls. TEST(NaggyMockTest, WarningForUninterestingCall) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); NaggyMock naggy_foo; @@ -337,14 +336,14 @@ EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } // Tests that a naggy mock generates a warning for an uninteresting call // that deletes the mock object. TEST(NaggyMockTest, WarningForUninterestingCallAfterDeath) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); NaggyMock* const naggy_foo = new NaggyMock; @@ -356,7 +355,7 @@ EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -419,8 +418,8 @@ } TEST(NaggyMockTest, IsNaggyInDestructor) { - const std::string saved_flag = GMOCK_FLAG(verbose); - GMOCK_FLAG(verbose) = "warning"; + const std::string saved_flag = GMOCK_FLAG_GET(verbose); + GMOCK_FLAG_SET(verbose, "warning"); CaptureStdout(); { @@ -431,7 +430,7 @@ EXPECT_THAT(GetCapturedStdout(), HasSubstr("Uninteresting mock function call")); - GMOCK_FLAG(verbose) = saved_flag; + GMOCK_FLAG_SET(verbose, saved_flag); } TEST(NaggyMockTest, IsNaggy_IsNice_IsStrict) { diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-spec-builders_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-spec-builders_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-spec-builders_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock-spec-builders_test.cc @@ -76,7 +76,6 @@ using testing::Eq; using testing::Expectation; using testing::ExpectationSet; -using testing::GMOCK_FLAG(verbose); using testing::Gt; using testing::IgnoreResult; using testing::InSequence; @@ -391,7 +390,7 @@ EXPECT_CALL(a, DoA(1)) .InSequence(s) .Times(1); - }, ".Times() cannot appear after "); + }, ".Times() may only appear *before* "); a.DoA(1); } @@ -696,9 +695,9 @@ } TEST(ExpectCallSyntaxTest, WarningIsErrorWithFlag) { - int original_behavior = testing::GMOCK_FLAG(default_mock_behavior); + int original_behavior = GMOCK_FLAG_GET(default_mock_behavior); - testing::GMOCK_FLAG(default_mock_behavior) = kAllow; + GMOCK_FLAG_SET(default_mock_behavior, kAllow); CaptureStdout(); { MockA a; @@ -707,7 +706,7 @@ std::string output = GetCapturedStdout(); EXPECT_TRUE(output.empty()) << output; - testing::GMOCK_FLAG(default_mock_behavior) = kWarn; + GMOCK_FLAG_SET(default_mock_behavior, kWarn); CaptureStdout(); { MockA a; @@ -718,14 +717,14 @@ EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = kFail; + GMOCK_FLAG_SET(default_mock_behavior, kFail); EXPECT_NONFATAL_FAILURE({ MockA a; a.DoA(0); }, "Uninteresting mock function call"); // Out of bounds values are converted to kWarn - testing::GMOCK_FLAG(default_mock_behavior) = -1; + GMOCK_FLAG_SET(default_mock_behavior, -1); CaptureStdout(); { MockA a; @@ -735,7 +734,7 @@ EXPECT_PRED_FORMAT2(IsSubstring, "GMOCK WARNING", warning_output); EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = 3; + GMOCK_FLAG_SET(default_mock_behavior, 3); CaptureStdout(); { MockA a; @@ -746,7 +745,7 @@ EXPECT_PRED_FORMAT2(IsSubstring, "Uninteresting mock function call", warning_output); - testing::GMOCK_FLAG(default_mock_behavior) = original_behavior; + GMOCK_FLAG_SET(default_mock_behavior, original_behavior); } #endif // GTEST_HAS_STREAM_REDIRECTION @@ -2024,10 +2023,10 @@ class VerboseFlagPreservingFixture : public testing::Test { protected: VerboseFlagPreservingFixture() - : saved_verbose_flag_(GMOCK_FLAG(verbose)) {} + : saved_verbose_flag_(GMOCK_FLAG_GET(verbose)) {} ~VerboseFlagPreservingFixture() override { - GMOCK_FLAG(verbose) = saved_verbose_flag_; + GMOCK_FLAG_SET(verbose, saved_verbose_flag_); } private: @@ -2043,7 +2042,7 @@ // --gmock_verbose=warning is specified. TEST(FunctionCallMessageTest, UninterestingCallOnNaggyMockGeneratesNoStackTraceWhenVerboseWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); NaggyMock c; CaptureStdout(); c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable()); @@ -2057,7 +2056,7 @@ // --gmock_verbose=info is specified. TEST(FunctionCallMessageTest, UninterestingCallOnNaggyMockGeneratesFyiWithStackTraceWhenVerboseInfo) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); NaggyMock c; CaptureStdout(); c.VoidMethod(false, 5, "Hi", nullptr, Printable(), Unprintable()); @@ -2213,7 +2212,7 @@ // Tests that --gmock_verbose=info causes both expected and // uninteresting calls to be reported. TEST_F(GMockVerboseFlagTest, Info) { - GMOCK_FLAG(verbose) = kInfoVerbosity; + GMOCK_FLAG_SET(verbose, kInfoVerbosity); TestExpectedCall(true); TestUninterestingCallOnNaggyMock(true); } @@ -2221,7 +2220,7 @@ // Tests that --gmock_verbose=warning causes uninteresting calls to be // reported. TEST_F(GMockVerboseFlagTest, Warning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); TestExpectedCall(false); TestUninterestingCallOnNaggyMock(true); } @@ -2229,7 +2228,7 @@ // Tests that --gmock_verbose=warning causes neither expected nor // uninteresting calls to be reported. TEST_F(GMockVerboseFlagTest, Error) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); TestExpectedCall(false); TestUninterestingCallOnNaggyMock(false); } @@ -2237,7 +2236,7 @@ // Tests that --gmock_verbose=SOME_INVALID_VALUE has the same effect // as --gmock_verbose=warning. TEST_F(GMockVerboseFlagTest, InvalidFlagIsTreatedAsWarning) { - GMOCK_FLAG(verbose) = "invalid"; // Treated as "warning". + GMOCK_FLAG_SET(verbose, "invalid"); // Treated as "warning". TestExpectedCall(false); TestUninterestingCallOnNaggyMock(true); } @@ -2270,21 +2269,21 @@ }; TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsWarning) { - GMOCK_FLAG(verbose) = kWarningVerbosity; + GMOCK_FLAG_SET(verbose, kWarningVerbosity); EXPECT_CALL(helper_, Foo(_)) .WillOnce(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This is an expected call. } TEST_F(GMockLogTest, DoesNotPrintGoodCallInternallyIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); EXPECT_CALL(helper_, Foo(_)) .WillOnce(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This is an expected call. } TEST_F(GMockLogTest, DoesNotPrintWarningInternallyIfVerbosityIsError) { - GMOCK_FLAG(verbose) = kErrorVerbosity; + GMOCK_FLAG_SET(verbose, kErrorVerbosity); ON_CALL(helper_, Foo(_)) .WillByDefault(Return(PrintMeNot())); helper_.Foo(PrintMeNot()); // This should generate a warning. @@ -2768,8 +2767,8 @@ testing::InitGoogleMock(&argc, argv); // Ensures that the tests pass no matter what value of // --gmock_catch_leaked_mocks and --gmock_verbose the user specifies. - testing::GMOCK_FLAG(catch_leaked_mocks) = true; - testing::GMOCK_FLAG(verbose) = testing::internal::kWarningVerbosity; + GMOCK_FLAG_SET(catch_leaked_mocks, true); + GMOCK_FLAG_SET(verbose, testing::internal::kWarningVerbosity); return RUN_ALL_TESTS(); } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_leak_test.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_leak_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_leak_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_leak_test.py @@ -31,7 +31,7 @@ """Tests that leaked mock objects can be caught be Google Mock.""" -import gmock_test_utils +from googlemock.test import gmock_test_utils PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_') TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*'] diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test.py @@ -43,7 +43,7 @@ import os import re import sys -import gmock_test_utils +from googlemock.test import gmock_test_utils # The flag for generating the golden file diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_.cc @@ -72,21 +72,21 @@ }; TEST_F(GMockOutputTest, ExpectedCall) { - testing::GMOCK_FLAG(verbose) = "info"; + GMOCK_FLAG_SET(verbose, "info"); EXPECT_CALL(foo_, Bar2(0, _)); foo_.Bar2(0, 0); // Expected call - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(verbose, "warning"); } TEST_F(GMockOutputTest, ExpectedCallToVoidFunction) { - testing::GMOCK_FLAG(verbose) = "info"; + GMOCK_FLAG_SET(verbose, "info"); EXPECT_CALL(foo_, Bar3(0, _)); foo_.Bar3(0, 0); // Expected call - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(verbose, "warning"); } TEST_F(GMockOutputTest, ExplicitActionsRunOut) { @@ -297,8 +297,8 @@ testing::InitGoogleMock(&argc, argv); // Ensures that the tests pass no matter what value of // --gmock_catch_leaked_mocks and --gmock_verbose the user specifies. - testing::GMOCK_FLAG(catch_leaked_mocks) = true; - testing::GMOCK_FLAG(verbose) = "warning"; + GMOCK_FLAG_SET(catch_leaked_mocks, true); + GMOCK_FLAG_SET(verbose, "warning"); TestCatchesLeakedMocksInAdHocTests(); return RUN_ALL_TESTS(); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_golden.txt b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_golden.txt --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_golden.txt +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_output_test_golden.txt @@ -291,7 +291,7 @@ [ RUN ] GMockOutputTest.PrintsMatcher FILE:#: Failure Value of: (std::pair(42, true)) -Expected: is pair (is >= 48, true) +Expected: is pair (first: is >= 48, second: true) Actual: (42, true) (of type std::pair) [ FAILED ] GMockOutputTest.PrintsMatcher [ FAILED ] GMockOutputTest.UnexpectedCall diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test.cc @@ -40,8 +40,6 @@ #if !defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) -using testing::GMOCK_FLAG(default_mock_behavior); -using testing::GMOCK_FLAG(verbose); using testing::InitGoogleMock; // Verifies that calling InitGoogleMock() on argv results in new_argv, @@ -49,7 +47,7 @@ template void TestInitGoogleMock(const Char* (&argv)[M], const Char* (&new_argv)[N], const ::std::string& expected_gmock_verbose) { - const ::std::string old_verbose = GMOCK_FLAG(verbose); + const ::std::string old_verbose = GMOCK_FLAG_GET(verbose); int argc = M - 1; InitGoogleMock(&argc, const_cast(argv)); @@ -59,8 +57,8 @@ EXPECT_STREQ(new_argv[i], argv[i]); } - EXPECT_EQ(expected_gmock_verbose, GMOCK_FLAG(verbose).c_str()); - GMOCK_FLAG(verbose) = old_verbose; // Restores the gmock_verbose flag. + EXPECT_EQ(expected_gmock_verbose, GMOCK_FLAG_GET(verbose)); + GMOCK_FLAG_SET(verbose, old_verbose); // Restores the gmock_verbose flag. } TEST(InitGoogleMockTest, ParsesInvalidCommandLine) { @@ -68,7 +66,7 @@ const char* new_argv[] = {nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesEmptyCommandLine) { @@ -76,7 +74,7 @@ const char* new_argv[] = {"foo.exe", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesSingleFlag) { @@ -88,16 +86,16 @@ } TEST(InitGoogleMockTest, ParsesMultipleFlags) { - int old_default_behavior = GMOCK_FLAG(default_mock_behavior); + int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); - EXPECT_EQ(2, GMOCK_FLAG(default_mock_behavior)); + EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); - GMOCK_FLAG(default_mock_behavior) = old_default_behavior; + GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(InitGoogleMockTest, ParsesUnrecognizedFlag) { @@ -105,7 +103,7 @@ const char* new_argv[] = {"foo.exe", "--non_gmock_flag=blah", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { @@ -122,7 +120,7 @@ const wchar_t* new_argv[] = {nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesEmptyCommandLine) { @@ -130,7 +128,7 @@ const wchar_t* new_argv[] = {L"foo.exe", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesSingleFlag) { @@ -142,16 +140,16 @@ } TEST(WideInitGoogleMockTest, ParsesMultipleFlags) { - int old_default_behavior = GMOCK_FLAG(default_mock_behavior); + int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); - EXPECT_EQ(2, GMOCK_FLAG(default_mock_behavior)); + EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); - GMOCK_FLAG(default_mock_behavior) = old_default_behavior; + GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(WideInitGoogleMockTest, ParsesUnrecognizedFlag) { @@ -159,7 +157,7 @@ const wchar_t* new_argv[] = {L"foo.exe", L"--non_gmock_flag=blah", nullptr}; - TestInitGoogleMock(argv, new_argv, GMOCK_FLAG(verbose)); + TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { @@ -175,7 +173,7 @@ // Makes sure Google Mock flags can be accessed in code. TEST(FlagTest, IsAccessibleInCode) { - bool dummy = testing::GMOCK_FLAG(catch_leaked_mocks) && - testing::GMOCK_FLAG(verbose) == ""; + bool dummy = + GMOCK_FLAG_GET(catch_leaked_mocks) && GMOCK_FLAG_GET(verbose) == ""; (void)dummy; // Avoids the "unused local variable" warning. } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test_utils.py b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test_utils.py --- a/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test_utils.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googlemock/test/gmock_test_utils.py @@ -30,21 +30,9 @@ """Unit test utilities for Google C++ Mocking Framework.""" import os -import sys - -# Determines path to gtest_test_utils and imports it. -SCRIPT_DIR = os.path.dirname(__file__) or '.' - -# isdir resolves symbolic links. -gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../../googletest/test') -if os.path.isdir(gtest_tests_util_dir): - GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir -else: - GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../googletest/test') -sys.path.append(GTEST_TESTS_UTIL_DIR) # pylint: disable=C6204 -import gtest_test_utils +from googletest.test import gtest_test_utils def GetSourceDir(): diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/googletest/googletest/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/CMakeLists.txt @@ -46,14 +46,9 @@ # Project version: -if (CMAKE_VERSION VERSION_LESS 3.0) - project(gtest CXX C) - set(PROJECT_VERSION ${GOOGLETEST_VERSION}) -else() - cmake_policy(SET CMP0048 NEW) - project(gtest VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) -endif() -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.5) +cmake_policy(SET CMP0048 NEW) +project(gtest VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) if (POLICY CMP0063) # Visibility cmake_policy(SET CMP0063 NEW) @@ -136,13 +131,17 @@ # to the targets for when we are part of a parent build (ie being pulled # in via add_subdirectory() rather than being a standalone build). if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11") + string(REPLACE ";" "$" dirs "${gtest_build_include_dirs}") target_include_directories(gtest SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") target_include_directories(gtest_main SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") endif() +if(CMAKE_SYSTEM_NAME MATCHES "QNX") + target_link_libraries(gtest PUBLIC regex) +endif() target_link_libraries(gtest_main PUBLIC gtest) ######################################################################## diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/README.md b/MicroBenchmarks/libs/benchmark/googletest/googletest/README.md --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/README.md +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/README.md @@ -94,7 +94,7 @@ FetchContent_Declare( googletest # Specify the commit you depend on and update it regularly. - URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip + URL https://github.com/google/googletest/archive/e2239ee6043f73722e7aa812a459f54a28552929.zip ) # For Windows: Prevent overriding the parent project's compiler/linker settings set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) @@ -203,7 +203,9 @@ -DGTEST_DONT_DEFINE_FOO=1 to the compiler flags to tell GoogleTest to change the macro's name from `FOO` -to `GTEST_FOO`. Currently `FOO` can be `FAIL`, `SUCCEED`, or `TEST`. For +to `GTEST_FOO`. Currently `FOO` can be `ASSERT_EQ`, `ASSERT_FALSE`, `ASSERT_GE`, +`ASSERT_GT`, `ASSERT_LE`, `ASSERT_LT`, `ASSERT_NE`, `ASSERT_TRUE`, +`EXPECT_FALSE`, `EXPECT_TRUE`, `FAIL`, `SUCCEED`, `TEST`, or `TEST_F`. For example, with `-DGTEST_DONT_DEFINE_TEST=1`, you'll need to write GTEST_TEST(SomeTest, DoesThis) { ... } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/cmake/internal_utils.cmake b/MicroBenchmarks/libs/benchmark/googletest/googletest/cmake/internal_utils.cmake --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/cmake/internal_utils.cmake +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/cmake/internal_utils.cmake @@ -154,10 +154,6 @@ set_target_properties(${name} PROPERTIES COMPILE_FLAGS "${cxx_flags}") - # Generate debug library name with a postfix. - set_target_properties(${name} - PROPERTIES - DEBUG_POSTFIX "d") # Set the output directory for build artifacts set_target_properties(${name} PROPERTIES @@ -304,6 +300,8 @@ COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test/${name}.py --build_dir=${CMAKE_CURRENT_BINARY_DIR}/\${CTEST_CONFIGURATION_TYPE} ${ARGN}) endif() + # Make the Python import path consistent between Bazel and CMake. + set_tests_properties(${name} PROPERTIES ENVIRONMENT PYTHONPATH=${CMAKE_SOURCE_DIR}) endif(PYTHONINTERP_FOUND) endfunction() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-assertion-result.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-assertion-result.h new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-assertion-result.h @@ -0,0 +1,232 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The Google C++ Testing and Mocking Framework (Google Test) +// +// This file implements the AssertionResult type. + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + +#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ +#define GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ + +#include +#include +#include +#include + +#include "gtest/gtest-message.h" +#include "gtest/internal/gtest-port.h" + +namespace testing { + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + +// C4800 is a level 3 warning in Visual Studio 2015 and earlier. +// This warning is not emitted in Visual Studio 2017. +// This warning is off by default starting in Visual Studio 2019 but can be +// enabled with command-line options. +#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) +#endif + + // Used in the EXPECT_TRUE/FALSE(bool_expression). + // + // T must be contextually convertible to bool. + // + // The second parameter prevents this overload from being considered if + // the argument is implicitly convertible to AssertionResult. In that case + // we want AssertionResult's copy constructor to be used. + template + explicit AssertionResult( + const T& success, + typename std::enable_if< + !std::is_convertible::value>::type* + /*enabler*/ + = nullptr) + : success_(success) {} + +#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) + GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + + // Assignment operator. + AssertionResult& operator=(AssertionResult other) { + swap(other); + return *this; + } + + // Returns true if and only if the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != nullptr ? message_->c_str() : ""; + } + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template + AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == nullptr) message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Swap the contents of this AssertionResult with other. + void swap(AssertionResult& other); + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + std::unique_ptr< ::std::string> message_; +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +} // namespace testing + +#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_ASSERTION_RESULT_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-death-test.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-death-test.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-death-test.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-death-test.h @@ -27,13 +27,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the public API for death tests. It is // #included by gtest.h so a user doesn't need to include this // directly. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ @@ -103,7 +105,6 @@ // // On the regular expressions used in death tests: // -// GOOGLETEST_CM0005 DO NOT DELETE // On POSIX-compliant systems (*nix), we use the library, // which uses the POSIX extended regex syntax. // @@ -204,7 +205,6 @@ # if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA // Tests that an exit code describes an exit due to termination by a // given signal. -// GOOGLETEST_CM0006 DO NOT DELETE class GTEST_API_ KilledBySignal { public: explicit KilledBySignal(int signum); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-matchers.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-matchers.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-matchers.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-matchers.h @@ -32,6 +32,10 @@ // This file implements just enough of the matcher interface to allow // EXPECT_DEATH and friends to accept a matcher argument. +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-message.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-message.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-message.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-message.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the Message class. @@ -42,7 +41,9 @@ // to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user // program! -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-param-test.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-param-test.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-param-test.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-param-test.h @@ -26,11 +26,14 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Macros and functions for implementing parameterized tests // in Google C++ Testing and Mocking Framework (Google Test) -// -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* + #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-printers.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-printers.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-printers.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-printers.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Test - The Google C++ Testing and Mocking Framework // // This file implements a universal value printer that can print a @@ -95,7 +94,9 @@ // being defined as many user-defined container types don't have // value_type. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ @@ -479,6 +480,12 @@ } #endif +// gcc/clang __{u,}int128_t +#if defined(__SIZEOF_INT128__) +GTEST_API_ void PrintTo(__uint128_t v, ::std::ostream* os); +GTEST_API_ void PrintTo(__int128_t v, ::std::ostream* os); +#endif // __SIZEOF_INT128__ + // Overloads for C strings. GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); inline void PrintTo(char* s, ::std::ostream* os) { @@ -587,6 +594,12 @@ inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; } +#if GTEST_HAS_RTTI +inline void PrintTo(const std::type_info& info, std::ostream* os) { + *os << internal::GetTypeName(info); +} +#endif // GTEST_HAS_RTTI + template void PrintTo(std::reference_wrapper ref, ::std::ostream* os) { UniversalPrinter::Print(ref.get(), os); @@ -744,6 +757,14 @@ } }; +template <> +class UniversalPrinter { + public: + static void Print(decltype(Nullopt()), ::std::ostream* os) { + *os << "(nullopt)"; + } +}; + #endif // GTEST_INTERNAL_HAS_OPTIONAL #if GTEST_INTERNAL_HAS_VARIANT diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-spi.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-spi.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-spi.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-spi.h @@ -27,12 +27,9 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // Utilities for testing Google Test itself and code that uses Google Test // (e.g. frameworks built on top of Google Test). -// GOOGLETEST_CM0004 DO NOT DELETE - #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-test-part.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-test-part.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-test-part.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-test-part.h @@ -26,8 +26,10 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-typed-test.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-typed-test.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-typed-test.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest-typed-test.h @@ -27,7 +27,9 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest.h @@ -27,7 +27,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the public API for Google Test. It should be @@ -47,8 +46,6 @@ // registration from Barthelemy Dagenais' (barthelemy@prologique.com) // easyUnit framework. -// GOOGLETEST_CM0001 DO NOT DELETE - #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_H_ @@ -59,16 +56,18 @@ #include #include -#include "gtest/internal/gtest-internal.h" -#include "gtest/internal/gtest-string.h" +#include "gtest/gtest-assertion-result.h" #include "gtest/gtest-death-test.h" #include "gtest/gtest-matchers.h" #include "gtest/gtest-message.h" #include "gtest/gtest-param-test.h" #include "gtest/gtest-printers.h" -#include "gtest/gtest_prod.h" #include "gtest/gtest-test-part.h" #include "gtest/gtest-typed-test.h" +#include "gtest/gtest_pred_impl.h" +#include "gtest/gtest_prod.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \ /* class A needs to have dll-interface to be used by clients of class B */) @@ -206,193 +205,6 @@ class TestInfo; class UnitTest; -// A class for indicating whether an assertion was successful. When -// the assertion wasn't successful, the AssertionResult object -// remembers a non-empty message that describes how it failed. -// -// To create an instance of this class, use one of the factory functions -// (AssertionSuccess() and AssertionFailure()). -// -// This class is useful for two purposes: -// 1. Defining predicate functions to be used with Boolean test assertions -// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts -// 2. Defining predicate-format functions to be -// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). -// -// For example, if you define IsEven predicate: -// -// testing::AssertionResult IsEven(int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess(); -// else -// return testing::AssertionFailure() << n << " is odd"; -// } -// -// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) -// will print the message -// -// Value of: IsEven(Fib(5)) -// Actual: false (5 is odd) -// Expected: true -// -// instead of a more opaque -// -// Value of: IsEven(Fib(5)) -// Actual: false -// Expected: true -// -// in case IsEven is a simple Boolean predicate. -// -// If you expect your predicate to be reused and want to support informative -// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up -// about half as often as positive ones in our tests), supply messages for -// both success and failure cases: -// -// testing::AssertionResult IsEven(int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess() << n << " is even"; -// else -// return testing::AssertionFailure() << n << " is odd"; -// } -// -// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print -// -// Value of: IsEven(Fib(6)) -// Actual: true (8 is even) -// Expected: false -// -// NB: Predicates that support negative Boolean assertions have reduced -// performance in positive ones so be careful not to use them in tests -// that have lots (tens of thousands) of positive Boolean assertions. -// -// To use this class with EXPECT_PRED_FORMAT assertions such as: -// -// // Verifies that Foo() returns an even number. -// EXPECT_PRED_FORMAT1(IsEven, Foo()); -// -// you need to define: -// -// testing::AssertionResult IsEven(const char* expr, int n) { -// if ((n % 2) == 0) -// return testing::AssertionSuccess(); -// else -// return testing::AssertionFailure() -// << "Expected: " << expr << " is even\n Actual: it's " << n; -// } -// -// If Foo() returns 5, you will see the following message: -// -// Expected: Foo() is even -// Actual: it's 5 -// -class GTEST_API_ AssertionResult { - public: - // Copy constructor. - // Used in EXPECT_TRUE/FALSE(assertion_result). - AssertionResult(const AssertionResult& other); - -// C4800 is a level 3 warning in Visual Studio 2015 and earlier. -// This warning is not emitted in Visual Studio 2017. -// This warning is off by default starting in Visual Studio 2019 but can be -// enabled with command-line options. -#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) - GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) -#endif - - // Used in the EXPECT_TRUE/FALSE(bool_expression). - // - // T must be contextually convertible to bool. - // - // The second parameter prevents this overload from being considered if - // the argument is implicitly convertible to AssertionResult. In that case - // we want AssertionResult's copy constructor to be used. - template - explicit AssertionResult( - const T& success, - typename std::enable_if< - !std::is_convertible::value>::type* - /*enabler*/ - = nullptr) - : success_(success) {} - -#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920) - GTEST_DISABLE_MSC_WARNINGS_POP_() -#endif - - // Assignment operator. - AssertionResult& operator=(AssertionResult other) { - swap(other); - return *this; - } - - // Returns true if and only if the assertion succeeded. - operator bool() const { return success_; } // NOLINT - - // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. - AssertionResult operator!() const; - - // Returns the text streamed into this AssertionResult. Test assertions - // use it when they fail (i.e., the predicate's outcome doesn't match the - // assertion's expectation). When nothing has been streamed into the - // object, returns an empty string. - const char* message() const { - return message_.get() != nullptr ? message_->c_str() : ""; - } - // Deprecated; please use message() instead. - const char* failure_message() const { return message(); } - - // Streams a custom failure message into this object. - template AssertionResult& operator<<(const T& value) { - AppendMessage(Message() << value); - return *this; - } - - // Allows streaming basic output manipulators such as endl or flush into - // this object. - AssertionResult& operator<<( - ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { - AppendMessage(Message() << basic_manipulator); - return *this; - } - - private: - // Appends the contents of message to message_. - void AppendMessage(const Message& a_message) { - if (message_.get() == nullptr) message_.reset(new ::std::string); - message_->append(a_message.GetString().c_str()); - } - - // Swap the contents of this AssertionResult with other. - void swap(AssertionResult& other); - - // Stores result of the assertion predicate. - bool success_; - // Stores the message describing the condition in case the expectation - // construct is not satisfied with the predicate's outcome. - // Referenced via a pointer to avoid taking too much stack frame space - // with test assertions. - std::unique_ptr< ::std::string> message_; -}; - -// Makes a successful assertion result. -GTEST_API_ AssertionResult AssertionSuccess(); - -// Makes a failed assertion result. -GTEST_API_ AssertionResult AssertionFailure(); - -// Makes a failed assertion result with the given failure message. -// Deprecated; use AssertionFailure() << msg. -GTEST_API_ AssertionResult AssertionFailure(const Message& msg); - -} // namespace testing - -// Includes the auto-generated header that implements a family of generic -// predicate assertion macros. This include comes late because it relies on -// APIs declared above. -#include "gtest/gtest_pred_impl.h" - -namespace testing { - // The abstract class that all tests inherit from. // // In Google Test, a unit test program contains one or many TestSuites, and @@ -1125,6 +937,9 @@ // Fired before the test starts. virtual void OnTestStart(const TestInfo& test_info) = 0; + // Fired when a test is disabled + virtual void OnTestDisabled(const TestInfo& /*test_info*/) {} + // Fired after a failed assertion or a SUCCEED() invocation. // If you want to throw an exception from this function to skip to the next // TEST, it must be AssertionException defined above, or inherited from it. @@ -1174,6 +989,7 @@ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ void OnTestStart(const TestInfo& /*test_info*/) override {} + void OnTestDisabled(const TestInfo& /*test_info*/) override {} void OnTestPartResult(const TestPartResult& /*test_part_result*/) override {} void OnTestEnd(const TestInfo& /*test_info*/) override {} void OnTestSuiteEnd(const TestSuite& /*test_suite*/) override {} @@ -2383,13 +2199,12 @@ // EXPECT_EQ(a_.size(), 0); // EXPECT_EQ(b_.size(), 1); // } -// -// GOOGLETEST_CM0011 DO NOT DELETE -#if !GTEST_DONT_DEFINE_TEST -#define TEST_F(test_fixture, test_name)\ +#define GTEST_TEST_F(test_fixture, test_name)\ GTEST_TEST_(test_fixture, test_name, test_fixture, \ ::testing::internal::GetTypeId()) -#endif // !GTEST_DONT_DEFINE_TEST +#if !GTEST_DONT_DEFINE_TEST_F +#define TEST_F(test_fixture, test_name) GTEST_TEST_F(test_fixture, test_name) +#endif // Returns a path to temporary directory. // Tries to determine an appropriate directory for the platform. @@ -2450,6 +2265,7 @@ // } // ... // int main(int argc, char** argv) { +// ::testing::InitGoogleTest(&argc, argv); // std::vector values_to_test = LoadValuesFromConfig(); // RegisterMyTests(values_to_test); // ... diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_pred_impl.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_pred_impl.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_pred_impl.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_pred_impl.h @@ -27,16 +27,21 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This file is AUTOMATICALLY GENERATED on 01/02/2019 by command +// This file is AUTOMATICALLY GENERATED on 07/21/2021 by command // 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! // // Implements a family of generic predicate assertion macros. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -#include "gtest/gtest.h" +#include "gtest/gtest-assertion-result.h" +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" namespace testing { diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h @@ -27,9 +27,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Google C++ Testing and Mocking Framework definitions useful in production code. -// GOOGLETEST_CM0003 DO NOT DELETE +// Google C++ Testing and Mocking Framework definitions useful in production +// code. #ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ #define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-death-test-internal.h @@ -26,12 +26,15 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines internal utilities needed for implementing // death tests. They are subject to change without notice. -// GOOGLETEST_CM0001 DO NOT DELETE + +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-filepath.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-filepath.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-filepath.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-filepath.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Google Test filepath utilities // // This header file declares classes and functions used internally by @@ -35,7 +35,9 @@ // This file is #included in gtest/internal/gtest-internal.h. // Do not include this header file separately! -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-internal.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-internal.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-internal.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-internal.h @@ -26,13 +26,15 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file declares functions and macros used internally by // Google Test. They are subject to change without notice. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ @@ -510,11 +512,11 @@ template // Note that SuiteApiResolver inherits from T because -// SetUpTestSuite()/TearDownTestSuite() could be protected. Ths way +// SetUpTestSuite()/TearDownTestSuite() could be protected. This way // SuiteApiResolver can access them. struct SuiteApiResolver : T { // testing::Test is only forward declared at this point. So we make it a - // dependend class for the compiler to be OK with it. + // dependent class for the compiler to be OK with it. using Test = typename std::conditional::type; diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-param-util.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-param-util.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-param-util.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-param-util.h @@ -27,10 +27,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Type and function utilities for implementing parameterized tests. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port-arch.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port-arch.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port-arch.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port-arch.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file defines the GTEST_OS_* macro. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-port.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // Low-level types and utilities for porting Google Test to various // platforms. All macros ending with _ and symbols defined in an // internal namespace are subject to change without notice. Code @@ -38,7 +38,9 @@ // files are expected to #include this. Therefore, it cannot #include // any other Google Test header. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ @@ -168,7 +170,6 @@ // GTEST_HAS_TYPED_TEST - typed tests // GTEST_HAS_TYPED_TEST_P - type-parameterized tests // GTEST_IS_THREADSAFE - Google Test is thread-safe. -// GOOGLETEST_CM0007 DO NOT DELETE // GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with // GTEST_HAS_POSIX_RE (see above) which users can // define themselves. @@ -191,9 +192,7 @@ // GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. // GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a // variable don't have to be used. -// GTEST_DISALLOW_ASSIGN_ - disables copy operator=. // GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. -// GTEST_DISALLOW_MOVE_ASSIGN_ - disables move operator=. // GTEST_DISALLOW_MOVE_AND_ASSIGN_ - disables move ctor and operator=. // GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. // GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is @@ -220,7 +219,6 @@ // Regular expressions: // RE - a simple regular expression class using the POSIX // Extended Regular Expression syntax on UNIX-like platforms -// GOOGLETEST_CM0008 DO NOT DELETE // or a reduced regular exception syntax on other // platforms, including Windows. // Logging: @@ -264,9 +262,17 @@ #include #include +// #include // Guarded by GTEST_IS_THREADSAFE below #include +#include #include +#include +#include +#include +// #include // Guarded by GTEST_IS_THREADSAFE below +#include #include +#include #ifndef _WIN32_WCE # include @@ -278,13 +284,6 @@ # include #endif -#include // NOLINT -#include -#include -#include // NOLINT -#include -#include // NOLINT - #include "gtest/internal/custom/gtest-port.h" #include "gtest/internal/gtest-port-arch.h" @@ -689,28 +688,19 @@ # define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) #endif - -// A macro to disallow copy operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_ASSIGN_(type) \ - type& operator=(type const &) = delete - // A macro to disallow copy constructor and operator= // This should be used in the private: declarations for a class. +// NOLINT is for modernize-use-trailing-return-type in macro uses. #define GTEST_DISALLOW_COPY_AND_ASSIGN_(type) \ type(type const&) = delete; \ - type& operator=(type const&) = delete - -// A macro to disallow move operator= -// This should be used in the private: declarations for a class. -#define GTEST_DISALLOW_MOVE_ASSIGN_(type) \ - type& operator=(type &&) noexcept = delete + type& operator=(type const&) = delete /* NOLINT */ // A macro to disallow move constructor and operator= // This should be used in the private: declarations for a class. +// NOLINT is for modernize-use-trailing-return-type in macro uses. #define GTEST_DISALLOW_MOVE_AND_ASSIGN_(type) \ type(type&&) noexcept = delete; \ - type& operator=(type&&) noexcept = delete + type& operator=(type&&) noexcept = delete /* NOLINT */ // Tell the compiler to warn about unused return values for functions declared // with this macro. The macro should be used on function declarations @@ -761,6 +751,12 @@ #endif // GTEST_IS_THREADSAFE +#if GTEST_IS_THREADSAFE +// Some platforms don't support including these threading related headers. +#include // NOLINT +#include // NOLINT +#endif // GTEST_IS_THREADSAFE + // GTEST_API_ qualifies all symbols that must be exported. The definitions below // are guarded by #ifndef to give embedders a chance to define GTEST_API_ in // gtest/internal/custom/gtest-port.h @@ -793,6 +789,20 @@ # define GTEST_NO_INLINE_ #endif +#if defined(__clang__) +// Nested ifs to avoid triggering MSVC warning. +#if __has_attribute(disable_tail_calls) +// Ask the compiler not to perform tail call optimization inside +// the marked function. +#define GTEST_NO_TAIL_CALL_ __attribute__((disable_tail_calls)) +#endif +#elif __GNUC__ +#define GTEST_NO_TAIL_CALL_ \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define GTEST_NO_TAIL_CALL_ +#endif + // _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. #if !defined(GTEST_HAS_CXXABI_H_) # if defined(__GLIBCXX__) || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) @@ -998,7 +1008,7 @@ // // GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition // is not satisfied. -// Synopsys: +// Synopsis: // GTEST_CHECK_(boolean_condition); // or // GTEST_CHECK_(boolean_condition) << "Additional message"; @@ -1053,7 +1063,7 @@ // const Foo*). When you use ImplicitCast_, the compiler checks that // the cast is safe. Such explicit ImplicitCast_s are necessary in // surprisingly many situations where C++ demands an exact type match -// instead of an argument type convertable to a target type. +// instead of an argument type convertible to a target type. // // The syntax for using ImplicitCast_ is the same as for static_cast: // @@ -1165,71 +1175,8 @@ // Defines synchronization primitives. #if GTEST_IS_THREADSAFE -# if GTEST_HAS_PTHREAD -// Sleeps for (roughly) n milliseconds. This function is only for testing -// Google Test's own constructs. Don't use it in user tests, either -// directly or indirectly. -inline void SleepMilliseconds(int n) { - const timespec time = { - 0, // 0 seconds. - n * 1000L * 1000L, // And n ms. - }; - nanosleep(&time, nullptr); -} -# endif // GTEST_HAS_PTHREAD - -# if GTEST_HAS_NOTIFICATION_ -// Notification has already been imported into the namespace. -// Nothing to do here. - -# elif GTEST_HAS_PTHREAD -// Allows a controller thread to pause execution of newly created -// threads until notified. Instances of this class must be created -// and destroyed in the controller thread. -// -// This class is only for testing Google Test's own constructs. Do not -// use it in user tests, either directly or indirectly. -class Notification { - public: - Notification() : notified_(false) { - GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr)); - } - ~Notification() { - pthread_mutex_destroy(&mutex_); - } - - // Notifies all threads created with this notification to start. Must - // be called from the controller thread. - void Notify() { - pthread_mutex_lock(&mutex_); - notified_ = true; - pthread_mutex_unlock(&mutex_); - } - - // Blocks until the controller thread notifies. Must be called from a test - // thread. - void WaitForNotification() { - for (;;) { - pthread_mutex_lock(&mutex_); - const bool notified = notified_; - pthread_mutex_unlock(&mutex_); - if (notified) - break; - SleepMilliseconds(10); - } - } - - private: - pthread_mutex_t mutex_; - bool notified_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); -}; - -# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT - -GTEST_API_ void SleepMilliseconds(int n); +# if GTEST_OS_WINDOWS // Provides leak-safe Windows kernel handle ownership. // Used in death tests and in threading support. class GTEST_API_ AutoHandle { @@ -1258,23 +1205,45 @@ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); }; +# endif + +# if GTEST_HAS_NOTIFICATION_ +// Notification has already been imported into the namespace. +// Nothing to do here. +# else // Allows a controller thread to pause execution of newly created // threads until notified. Instances of this class must be created // and destroyed in the controller thread. // // This class is only for testing Google Test's own constructs. Do not // use it in user tests, either directly or indirectly. +// TODO(b/203539622): Replace unconditionally with absl::Notification. class GTEST_API_ Notification { public: - Notification(); - void Notify(); - void WaitForNotification(); + Notification() : notified_(false) {} + Notification(const Notification&) = delete; + Notification& operator=(const Notification&) = delete; - private: - AutoHandle event_; + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + std::lock_guard lock(mu_); + notified_ = true; + cv_.notify_all(); + } - GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + std::unique_lock lock(mu_); + cv_.wait(lock, [this]() { return notified_; }); + } + + private: + std::mutex mu_; + std::condition_variable cv_; + bool notified_; }; # endif // GTEST_HAS_NOTIFICATION_ @@ -2219,29 +2188,29 @@ #define GTEST_DECLARE_bool_(name) \ namespace testing { \ GTEST_API_ extern bool GTEST_FLAG(name); \ - } + } static_assert(true, "no-op to require trailing semicolon") #define GTEST_DECLARE_int32_(name) \ namespace testing { \ GTEST_API_ extern std::int32_t GTEST_FLAG(name); \ - } + } static_assert(true, "no-op to require trailing semicolon") #define GTEST_DECLARE_string_(name) \ namespace testing { \ GTEST_API_ extern ::std::string GTEST_FLAG(name); \ - } + } static_assert(true, "no-op to require trailing semicolon") // Macros for defining flags. #define GTEST_DEFINE_bool_(name, default_val, doc) \ namespace testing { \ GTEST_API_ bool GTEST_FLAG(name) = (default_val); \ - } + } static_assert(true, "no-op to require trailing semicolon") #define GTEST_DEFINE_int32_(name, default_val, doc) \ namespace testing { \ GTEST_API_ std::int32_t GTEST_FLAG(name) = (default_val); \ - } + } static_assert(true, "no-op to require trailing semicolon") #define GTEST_DEFINE_string_(name, default_val, doc) \ namespace testing { \ GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val); \ - } + } static_assert(true, "no-op to require trailing semicolon") #endif // !defined(GTEST_DECLARE_bool_) @@ -2329,6 +2298,7 @@ namespace internal { template using Optional = ::absl::optional; +inline ::absl::nullopt_t Nullopt() { return ::absl::nullopt; } } // namespace internal } // namespace testing #else @@ -2342,6 +2312,7 @@ namespace internal { template using Optional = ::std::optional; +inline ::std::nullopt_t Nullopt() { return ::std::nullopt; } } // namespace internal } // namespace testing // The case where absl is configured NOT to alias std::optional is not diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-string.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-string.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-string.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-string.h @@ -26,7 +26,7 @@ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// + // The Google C++ Testing and Mocking Framework (Google Test) // // This header file declares the String class and functions used internally by @@ -36,7 +36,9 @@ // This header file is #included by gtest-internal.h. // It should not be #included by other files. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-type-util.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-type-util.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-type-util.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/internal/gtest-type-util.h @@ -30,7 +30,9 @@ // Type utilities needed for implementing typed and type-parameterized // tests. -// GOOGLETEST_CM0001 DO NOT DELETE +// IWYU pragma: private, include "gtest/gtest.h" +// IWYU pragma: friend gtest/.* +// IWYU pragma: friend gmock/.* #ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ #define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/samples/sample9_unittest.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/samples/sample9_unittest.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/samples/sample9_unittest.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/samples/sample9_unittest.cc @@ -29,7 +29,7 @@ // This sample shows how to use Google Test listener API to implement // an alternative console output and how to use the UnitTest reflection API -// to enumerate test cases and tests and to inspect their results. +// to enumerate test suites and tests and to inspect their results. #include @@ -38,7 +38,7 @@ using ::testing::EmptyTestEventListener; using ::testing::InitGoogleTest; using ::testing::Test; -using ::testing::TestCase; +using ::testing::TestSuite; using ::testing::TestEventListeners; using ::testing::TestInfo; using ::testing::TestPartResult; @@ -61,7 +61,7 @@ void OnTestStart(const TestInfo& test_info) override { fprintf(stdout, "*** Test %s.%s starting.\n", - test_info.test_case_name(), + test_info.test_suite_name(), test_info.name()); fflush(stdout); } @@ -81,7 +81,7 @@ void OnTestEnd(const TestInfo& test_info) override { fprintf(stdout, "*** Test %s.%s ending.\n", - test_info.test_case_name(), + test_info.test_suite_name(), test_info.name()); fflush(stdout); } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/README.md b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/README.md deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Please Note: - -Files in this directory are no longer supported by the maintainers. They -represent mosty historical artifacts and supported by the community only. There -is no guarantee whatsoever that these scripts still work. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/common.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/common.py deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/common.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Shared utilities for writing scripts for Google Test/Mock.""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - - -import os -import re - - -# Matches the line from 'svn info .' output that describes what SVN -# path the current local directory corresponds to. For example, in -# a googletest SVN workspace's trunk/test directory, the output will be: -# -# URL: https://googletest.googlecode.com/svn/trunk/test -_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)') - - -def GetCommandOutput(command): - """Runs the shell command and returns its stdout as a list of lines.""" - - f = os.popen(command, 'r') - lines = [line.strip() for line in f.readlines()] - f.close() - return lines - - -def GetSvnInfo(): - """Returns the project name and the current SVN workspace's root path.""" - - for line in GetCommandOutput('svn info .'): - m = _SVN_INFO_URL_RE.match(line) - if m: - project = m.group(1) # googletest or googlemock - rel_path = m.group(2) - root = os.path.realpath(rel_path.count('/') * '../') - return project, root - - return None, None - - -def GetSvnTrunk(): - """Returns the current SVN workspace's trunk root path.""" - - _, root = GetSvnInfo() - return root + '/trunk' if root else None - - -def IsInGTestSvn(): - project, _ = GetSvnInfo() - return project == 'googletest' - - -def IsInGMockSvn(): - project, _ = GetSvnInfo() - return project == 'googlemock' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/fuse_gtest_files.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/fuse_gtest_files.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/fuse_gtest_files.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""fuse_gtest_files.py v0.2.0 -Fuses Google Test source code into a .h file and a .cc file. - -SYNOPSIS - fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR - - Scans GTEST_ROOT_DIR for Google Test source code, and generates - two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. - Then you can build your tests by adding OUTPUT_DIR to the include - search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These - two files contain everything you need to use Google Test. Hence - you can "install" Google Test by copying them to wherever you want. - - GTEST_ROOT_DIR can be omitted and defaults to the parent - directory of the directory holding this script. - -EXAMPLES - ./fuse_gtest_files.py fused_gtest - ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest - -This tool is experimental. In particular, it assumes that there is no -conditional inclusion of Google Test headers. Please report any -problems to googletestframework@googlegroups.com. You can read -https://github.com/google/googletest/blob/master/googletest/docs/advanced.md for -more information. -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import re -try: - from sets import Set as set # For Python 2.3 compatibility -except ImportError: - pass -import sys - -# We assume that this file is in the scripts/ directory in the Google -# Test root directory. -DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') - -# Regex for matching '#include "gtest/..."'. -INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') - -# Regex for matching '#include "src/..."'. -INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') - -# Where to find the source seed files. -GTEST_H_SEED = 'include/gtest/gtest.h' -GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' -GTEST_ALL_CC_SEED = 'src/gtest-all.cc' - -# Where to put the generated files. -GTEST_H_OUTPUT = 'gtest/gtest.h' -GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' - - -def VerifyFileExists(directory, relative_path): - """Verifies that the given file exists; aborts on failure. - - relative_path is the file path relative to the given directory. - """ - - if not os.path.isfile(os.path.join(directory, relative_path)): - print('ERROR: Cannot find %s in directory %s.' % (relative_path, - directory)) - print('Please either specify a valid project root directory ' - 'or omit it on the command line.') - sys.exit(1) - - -def ValidateGTestRootDir(gtest_root): - """Makes sure gtest_root points to a valid gtest root directory. - - The function aborts the program on failure. - """ - - VerifyFileExists(gtest_root, GTEST_H_SEED) - VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) - - -def VerifyOutputFile(output_dir, relative_path): - """Verifies that the given output file path is valid. - - relative_path is relative to the output_dir directory. - """ - - # Makes sure the output file either doesn't exist or can be overwritten. - output_file = os.path.join(output_dir, relative_path) - if os.path.exists(output_file): - # TODO(wan@google.com): The following user-interaction doesn't - # work with automated processes. We should provide a way for the - # Makefile to force overwriting the files. - print('%s already exists in directory %s - overwrite it? (y/N) ' % - (relative_path, output_dir)) - answer = sys.stdin.readline().strip() - if answer not in ['y', 'Y']: - print('ABORTED.') - sys.exit(1) - - # Makes sure the directory holding the output file exists; creates - # it and all its ancestors if necessary. - parent_directory = os.path.dirname(output_file) - if not os.path.isdir(parent_directory): - os.makedirs(parent_directory) - - -def ValidateOutputDir(output_dir): - """Makes sure output_dir points to a valid output directory. - - The function aborts the program on failure. - """ - - VerifyOutputFile(output_dir, GTEST_H_OUTPUT) - VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) - - -def FuseGTestH(gtest_root, output_dir): - """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" - - output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') - processed_files = set() # Holds all gtest headers we've processed. - - def ProcessFile(gtest_header_path): - """Processes the given gtest header file.""" - - # We don't process the same header twice. - if gtest_header_path in processed_files: - return - - processed_files.add(gtest_header_path) - - # Reads each line in the given gtest header. - for line in open(os.path.join(gtest_root, gtest_header_path), 'r'): - m = INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - # It's '#include "gtest/..."' - let's process it recursively. - ProcessFile('include/' + m.group(1)) - else: - # Otherwise we copy the line unchanged to the output file. - output_file.write(line) - - ProcessFile(GTEST_H_SEED) - output_file.close() - - -def FuseGTestAllCcToFile(gtest_root, output_file): - """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" - - processed_files = set() - - def ProcessFile(gtest_source_file): - """Processes the given gtest source file.""" - - # We don't process the same #included file twice. - if gtest_source_file in processed_files: - return - - processed_files.add(gtest_source_file) - - # Reads each line in the given gtest source file. - for line in open(os.path.join(gtest_root, gtest_source_file), 'r'): - m = INCLUDE_GTEST_FILE_REGEX.match(line) - if m: - if 'include/' + m.group(1) == GTEST_SPI_H_SEED: - # It's '#include "gtest/gtest-spi.h"'. This file is not - # #included by "gtest/gtest.h", so we need to process it. - ProcessFile(GTEST_SPI_H_SEED) - else: - # It's '#include "gtest/foo.h"' where foo is not gtest-spi. - # We treat it as '#include "gtest/gtest.h"', as all other - # gtest headers are being fused into gtest.h and cannot be - # #included directly. - - # There is no need to #include "gtest/gtest.h" more than once. - if not GTEST_H_SEED in processed_files: - processed_files.add(GTEST_H_SEED) - output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) - else: - m = INCLUDE_SRC_FILE_REGEX.match(line) - if m: - # It's '#include "src/foo"' - let's process it recursively. - ProcessFile(m.group(1)) - else: - output_file.write(line) - - ProcessFile(GTEST_ALL_CC_SEED) - - -def FuseGTestAllCc(gtest_root, output_dir): - """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" - - output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') - FuseGTestAllCcToFile(gtest_root, output_file) - output_file.close() - - -def FuseGTest(gtest_root, output_dir): - """Fuses gtest.h and gtest-all.cc.""" - - ValidateGTestRootDir(gtest_root) - ValidateOutputDir(output_dir) - - FuseGTestH(gtest_root, output_dir) - FuseGTestAllCc(gtest_root, output_dir) - - -def main(): - argc = len(sys.argv) - if argc == 2: - # fuse_gtest_files.py OUTPUT_DIR - FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) - elif argc == 3: - # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR - FuseGTest(sys.argv[1], sys.argv[2]) - else: - print(__doc__) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gen_gtest_pred_impl.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gen_gtest_pred_impl.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gen_gtest_pred_impl.py +++ /dev/null @@ -1,733 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2006, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""gen_gtest_pred_impl.py v0.1 - -Generates the implementation of Google Test predicate assertions and -accompanying tests. - -Usage: - - gen_gtest_pred_impl.py MAX_ARITY - -where MAX_ARITY is a positive integer. - -The command generates the implementation of up-to MAX_ARITY-ary -predicate assertions, and writes it to file gtest_pred_impl.h in the -directory where the script is. It also generates the accompanying -unit test in file gtest_pred_impl_unittest.cc. -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import sys -import time - -# Where this script is. -SCRIPT_DIR = os.path.dirname(sys.argv[0]) - -# Where to store the generated header. -HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h') - -# Where to store the generated unit test. -UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc') - - -def HeaderPreamble(n): - """Returns the preamble for the header file. - - Args: - n: the maximum arity of the predicate macros to be generated. - """ - - # A map that defines the values used in the preamble template. - DEFS = { - 'today' : time.strftime('%m/%d/%Y'), - 'year' : time.strftime('%Y'), - 'command' : '%s %s' % (os.path.basename(sys.argv[0]), n), - 'n' : n - } - - return ( - """// Copyright 2006, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is AUTOMATICALLY GENERATED on %(today)s by command -// '%(command)s'. DO NOT EDIT BY HAND! -// -// Implements a family of generic predicate assertion macros. -// GOOGLETEST_CM0001 DO NOT DELETE - - -#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ - -#include "gtest/gtest.h" - -namespace testing { - -// This header implements a family of generic predicate assertion -// macros: -// -// ASSERT_PRED_FORMAT1(pred_format, v1) -// ASSERT_PRED_FORMAT2(pred_format, v1, v2) -// ... -// -// where pred_format is a function or functor that takes n (in the -// case of ASSERT_PRED_FORMATn) values and their source expression -// text, and returns a testing::AssertionResult. See the definition -// of ASSERT_EQ in gtest.h for an example. -// -// If you don't care about formatting, you can use the more -// restrictive version: -// -// ASSERT_PRED1(pred, v1) -// ASSERT_PRED2(pred, v1, v2) -// ... -// -// where pred is an n-ary function or functor that returns bool, -// and the values v1, v2, ..., must support the << operator for -// streaming to std::ostream. -// -// We also define the EXPECT_* variations. -// -// For now we only support predicates whose arity is at most %(n)s. -// Please email googletestframework@googlegroups.com if you need -// support for higher arities. - -// GTEST_ASSERT_ is the basic statement to which all of the assertions -// in this file reduce. Don't use this in your code. - -#define GTEST_ASSERT_(expression, on_failure) \\ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\ - if (const ::testing::AssertionResult gtest_ar = (expression)) \\ - ; \\ - else \\ - on_failure(gtest_ar.failure_message()) -""" % DEFS) - - -def Arity(n): - """Returns the English name of the given arity.""" - - if n < 0: - return None - elif n <= 3: - return ['nullary', 'unary', 'binary', 'ternary'][n] - else: - return '%s-ary' % n - - -def Title(word): - """Returns the given word in title case. The difference between - this and string's title() method is that Title('4-ary') is '4-ary' - while '4-ary'.title() is '4-Ary'.""" - - return word[0].upper() + word[1:] - - -def OneTo(n): - """Returns the list [1, 2, 3, ..., n].""" - - return range(1, n + 1) - - -def Iter(n, format, sep=''): - """Given a positive integer n, a format string that contains 0 or - more '%s' format specs, and optionally a separator string, returns - the join of n strings, each formatted with the format string on an - iterator ranged from 1 to n. - - Example: - - Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'. - """ - - # How many '%s' specs are in format? - spec_count = len(format.split('%s')) - 1 - return sep.join([format % (spec_count * (i,)) for i in OneTo(n)]) - - -def ImplementationForArity(n): - """Returns the implementation of n-ary predicate assertions.""" - - # A map the defines the values used in the implementation template. - DEFS = { - 'n' : str(n), - 'vs' : Iter(n, 'v%s', sep=', '), - 'vts' : Iter(n, '#v%s', sep=', '), - 'arity' : Arity(n), - 'Arity' : Title(Arity(n)) - } - - impl = """ - -// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use -// this in your code. -template -AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS - - impl += Iter(n, """, - const char* e%s""") - - impl += """, - Pred pred""" - - impl += Iter(n, """, - const T%s& v%s""") - - impl += """) { - if (pred(%(vs)s)) return AssertionSuccess(); - -""" % DEFS - - impl += ' return AssertionFailure() << pred_text << "("' - - impl += Iter(n, """ - << e%s""", sep=' << ", "') - - impl += ' << ") evaluates to false, where"' - - impl += Iter( - n, """ - << "\\n" << e%s << " evaluates to " << ::testing::PrintToString(v%s)""" - ) - - impl += """; -} - -// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s. -// Don't use this in your code. -#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\ - GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\ - on_failure) - -// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use -// this in your code. -#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\ - GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS - - impl += Iter(n, """, \\ - #v%s""") - - impl += """, \\ - pred""" - - impl += Iter(n, """, \\ - v%s""") - - impl += """), on_failure) - -// %(Arity)s predicate assertion macros. -#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ - GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_) -#define EXPECT_PRED%(n)s(pred, %(vs)s) \\ - GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_) -#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ - GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_) -#define ASSERT_PRED%(n)s(pred, %(vs)s) \\ - GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_) - -""" % DEFS - - return impl - - -def HeaderPostamble(): - """Returns the postamble for the header file.""" - - return """ - -} // namespace testing - -#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ -""" - - -def GenerateFile(path, content): - """Given a file path and a content string - overwrites it with the given content. - """ - print 'Updating file %s . . .' % path - f = file(path, 'w+') - print >>f, content, - f.close() - - print 'File %s has been updated.' % path - - -def GenerateHeader(n): - """Given the maximum arity n, updates the header file that implements - the predicate assertions. - """ - GenerateFile(HEADER, - HeaderPreamble(n) - + ''.join([ImplementationForArity(i) for i in OneTo(n)]) - + HeaderPostamble()) - - -def UnitTestPreamble(): - """Returns the preamble for the unit test file.""" - - # A map that defines the values used in the preamble template. - DEFS = { - 'today' : time.strftime('%m/%d/%Y'), - 'year' : time.strftime('%Y'), - 'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]), - } - - return ( - """// Copyright 2006, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// This file is AUTOMATICALLY GENERATED on %(today)s by command -// '%(command)s'. DO NOT EDIT BY HAND! - -// Regression test for gtest_pred_impl.h -// -// This file is generated by a script and quite long. If you intend to -// learn how Google Test works by reading its unit tests, read -// gtest_unittest.cc instead. -// -// This is intended as a regression test for the Google Test predicate -// assertions. We compile it as part of the gtest_unittest target -// only to keep the implementation tidy and compact, as it is quite -// involved to set up the stage for testing Google Test using Google -// Test itself. -// -// Currently, gtest_unittest takes ~11 seconds to run in the testing -// daemon. In the future, if it grows too large and needs much more -// time to finish, we should consider separating this file into a -// stand-alone regression test. - -#include - -#include "gtest/gtest.h" -#include "gtest/gtest-spi.h" - -// A user-defined data type. -struct Bool { - explicit Bool(int val) : value(val != 0) {} - - bool operator>(int n) const { return value > Bool(n).value; } - - Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); } - - bool operator==(const Bool& rhs) const { return value == rhs.value; } - - bool value; -}; - -// Enables Bool to be used in assertions. -std::ostream& operator<<(std::ostream& os, const Bool& x) { - return os << (x.value ? "true" : "false"); -} - -""" % DEFS) - - -def TestsForArity(n): - """Returns the tests for n-ary predicate assertions.""" - - # A map that defines the values used in the template for the tests. - DEFS = { - 'n' : n, - 'es' : Iter(n, 'e%s', sep=', '), - 'vs' : Iter(n, 'v%s', sep=', '), - 'vts' : Iter(n, '#v%s', sep=', '), - 'tvs' : Iter(n, 'T%s v%s', sep=', '), - 'int_vs' : Iter(n, 'int v%s', sep=', '), - 'Bool_vs' : Iter(n, 'Bool v%s', sep=', '), - 'types' : Iter(n, 'typename T%s', sep=', '), - 'v_sum' : Iter(n, 'v%s', sep=' + '), - 'arity' : Arity(n), - 'Arity' : Title(Arity(n)), - } - - tests = ( - """// Sample functions/functors for testing %(arity)s predicate assertions. - -// A %(arity)s predicate function. -template <%(types)s> -bool PredFunction%(n)s(%(tvs)s) { - return %(v_sum)s > 0; -} - -// The following two functions are needed because a compiler doesn't have -// a context yet to know which template function must be instantiated. -bool PredFunction%(n)sInt(%(int_vs)s) { - return %(v_sum)s > 0; -} -bool PredFunction%(n)sBool(%(Bool_vs)s) { - return %(v_sum)s > 0; -} -""" % DEFS) - - tests += """ -// A %(arity)s predicate functor. -struct PredFunctor%(n)s { - template <%(types)s> - bool operator()(""" % DEFS - - tests += Iter(n, 'const T%s& v%s', sep=""", - """) - - tests += """) { - return %(v_sum)s > 0; - } -}; -""" % DEFS - - tests += """ -// A %(arity)s predicate-formatter function. -template <%(types)s> -testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS - - tests += Iter(n, 'const char* e%s', sep=""", - """) - - tests += Iter(n, """, - const T%s& v%s""") - - tests += """) { - if (PredFunction%(n)s(%(vs)s)) - return testing::AssertionSuccess(); - - return testing::AssertionFailure() - << """ % DEFS - - tests += Iter(n, 'e%s', sep=' << " + " << ') - - tests += """ - << " is expected to be positive, but evaluates to " - << %(v_sum)s << "."; -} -""" % DEFS - - tests += """ -// A %(arity)s predicate-formatter functor. -struct PredFormatFunctor%(n)s { - template <%(types)s> - testing::AssertionResult operator()(""" % DEFS - - tests += Iter(n, 'const char* e%s', sep=""", - """) - - tests += Iter(n, """, - const T%s& v%s""") - - tests += """) const { - return PredFormatFunction%(n)s(%(es)s, %(vs)s); - } -}; -""" % DEFS - - tests += """ -// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s. - -class Predicate%(n)sTest : public testing::Test { - protected: - void SetUp() override { - expected_to_finish_ = true; - finished_ = false;""" % DEFS - - tests += """ - """ + Iter(n, 'n%s_ = ') + """0; - } -""" - - tests += """ - void TearDown() override { - // Verifies that each of the predicate's arguments was evaluated - // exactly once.""" - - tests += ''.join([""" - EXPECT_EQ(1, n%s_) << - "The predicate assertion didn't evaluate argument %s " - "exactly once.";""" % (i, i + 1) for i in OneTo(n)]) - - tests += """ - - // Verifies that the control flow in the test function is expected. - if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; - } else if (!expected_to_finish_ && finished_) { - FAIL() << "The failed predicate assertion didn't abort the test " - "as expected."; - } - } - - // true if and only if the test function is expected to run to finish. - static bool expected_to_finish_; - - // true if and only if the test function did run to finish. - static bool finished_; -""" % DEFS - - tests += Iter(n, """ - static int n%s_;""") - - tests += """ -}; - -bool Predicate%(n)sTest::expected_to_finish_; -bool Predicate%(n)sTest::finished_; -""" % DEFS - - tests += Iter(n, """int Predicate%%(n)sTest::n%s_; -""") % DEFS - - tests += """ -typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest; -typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest; -typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest; -typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest; -""" % DEFS - - def GenTest(use_format, use_assert, expect_failure, - use_functor, use_user_type): - """Returns the test for a predicate assertion macro. - - Args: - use_format: true if and only if the assertion is a *_PRED_FORMAT*. - use_assert: true if and only if the assertion is a ASSERT_*. - expect_failure: true if and only if the assertion is expected to fail. - use_functor: true if and only if the first argument of the assertion is - a functor (as opposed to a function) - use_user_type: true if and only if the predicate functor/function takes - argument(s) of a user-defined type. - - Example: - - GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior - of a successful EXPECT_PRED_FORMATn() that takes a functor - whose arguments have built-in types.""" - - if use_assert: - assrt = 'ASSERT' # 'assert' is reserved, so we cannot use - # that identifier here. - else: - assrt = 'EXPECT' - - assertion = assrt + '_PRED' - - if use_format: - pred_format = 'PredFormat' - assertion += '_FORMAT' - else: - pred_format = 'Pred' - - assertion += '%(n)s' % DEFS - - if use_functor: - pred_format_type = 'functor' - pred_format += 'Functor%(n)s()' - else: - pred_format_type = 'function' - pred_format += 'Function%(n)s' - if not use_format: - if use_user_type: - pred_format += 'Bool' - else: - pred_format += 'Int' - - test_name = pred_format_type.title() - - if use_user_type: - arg_type = 'user-defined type (Bool)' - test_name += 'OnUserType' - if expect_failure: - arg = 'Bool(n%s_++)' - else: - arg = 'Bool(++n%s_)' - else: - arg_type = 'built-in type (int)' - test_name += 'OnBuiltInType' - if expect_failure: - arg = 'n%s_++' - else: - arg = '++n%s_' - - if expect_failure: - successful_or_failed = 'failed' - expected_or_not = 'expected.' - test_name += 'Failure' - else: - successful_or_failed = 'successful' - expected_or_not = 'UNEXPECTED!' - test_name += 'Success' - - # A map that defines the values used in the test template. - defs = DEFS.copy() - defs.update({ - 'assert' : assrt, - 'assertion' : assertion, - 'test_name' : test_name, - 'pf_type' : pred_format_type, - 'pf' : pred_format, - 'arg_type' : arg_type, - 'arg' : arg, - 'successful' : successful_or_failed, - 'expected' : expected_or_not, - }) - - test = """ -// Tests a %(successful)s %(assertion)s where the -// predicate-formatter is a %(pf_type)s on a %(arg_type)s. -TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs - - indent = (len(assertion) + 3)*' ' - extra_indent = '' - - if expect_failure: - extra_indent = ' ' - if use_assert: - test += """ - expected_to_finish_ = false; - EXPECT_FATAL_FAILURE({ // NOLINT""" - else: - test += """ - EXPECT_NONFATAL_FAILURE({ // NOLINT""" - - test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs - - test = test % defs - test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs) - test += ');\n' + extra_indent + ' finished_ = true;\n' - - if expect_failure: - test += ' }, "");\n' - - test += '}\n' - return test - - # Generates tests for all 2**6 = 64 combinations. - tests += ''.join([GenTest(use_format, use_assert, expect_failure, - use_functor, use_user_type) - for use_format in [0, 1] - for use_assert in [0, 1] - for expect_failure in [0, 1] - for use_functor in [0, 1] - for use_user_type in [0, 1] - ]) - - return tests - - -def UnitTestPostamble(): - """Returns the postamble for the tests.""" - - return '' - - -def GenerateUnitTest(n): - """Returns the tests for up-to n-ary predicate assertions.""" - - GenerateFile(UNIT_TEST, - UnitTestPreamble() - + ''.join([TestsForArity(i) for i in OneTo(n)]) - + UnitTestPostamble()) - - -def _Main(): - """The entry point of the script. Generates the header file and its - unit test.""" - - if len(sys.argv) != 2: - print __doc__ - print 'Author: ' + __author__ - sys.exit(1) - - n = int(sys.argv[1]) - GenerateHeader(n) - GenerateUnitTest(n) - - -if __name__ == '__main__': - _Main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gtest-config.in b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gtest-config.in deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/gtest-config.in +++ /dev/null @@ -1,274 +0,0 @@ -#!/bin/sh - -# These variables are automatically filled in by the configure script. -name="@PACKAGE_TARNAME@" -version="@PACKAGE_VERSION@" - -show_usage() -{ - echo "Usage: gtest-config [OPTIONS...]" -} - -show_help() -{ - show_usage - cat <<\EOF - -The `gtest-config' script provides access to the necessary compile and linking -flags to connect with Google C++ Testing Framework, both in a build prior to -installation, and on the system proper after installation. The installation -overrides may be issued in combination with any other queries, but will only -affect installation queries if called on a built but not installed gtest. The -installation queries may not be issued with any other types of queries, and -only one installation query may be made at a time. The version queries and -compiler flag queries may be combined as desired but not mixed. Different -version queries are always combined with logical "and" semantics, and only the -last of any particular query is used while all previous ones ignored. All -versions must be specified as a sequence of numbers separated by periods. -Compiler flag queries output the union of the sets of flags when combined. - - Examples: - gtest-config --min-version=1.0 || echo "Insufficient Google Test version." - - g++ $(gtest-config --cppflags --cxxflags) -o foo.o -c foo.cpp - g++ $(gtest-config --ldflags --libs) -o foo foo.o - - # When using a built but not installed Google Test: - g++ $(../../my_gtest_build/scripts/gtest-config ...) ... - - # When using an installed Google Test, but with installation overrides: - export GTEST_PREFIX="/opt" - g++ $(gtest-config --libdir="/opt/lib64" ...) ... - - Help: - --usage brief usage information - --help display this help message - - Installation Overrides: - --prefix= overrides the installation prefix - --exec-prefix= overrides the executable installation prefix - --libdir= overrides the library installation prefix - --includedir= overrides the header file installation prefix - - Installation Queries: - --prefix installation prefix - --exec-prefix executable installation prefix - --libdir library installation directory - --includedir header file installation directory - --version the version of the Google Test installation - - Version Queries: - --min-version=VERSION return 0 if the version is at least VERSION - --exact-version=VERSION return 0 if the version is exactly VERSION - --max-version=VERSION return 0 if the version is at most VERSION - - Compilation Flag Queries: - --cppflags compile flags specific to the C-like preprocessors - --cxxflags compile flags appropriate for C++ programs - --ldflags linker flags - --libs libraries for linking - -EOF -} - -# This function bounds our version with a min and a max. It uses some clever -# POSIX-compliant variable expansion to portably do all the work in the shell -# and avoid any dependency on a particular "sed" or "awk" implementation. -# Notable is that it will only ever compare the first 3 components of versions. -# Further components will be cleanly stripped off. All versions must be -# unadorned, so "v1.0" will *not* work. The minimum version must be in $1, and -# the max in $2. TODO(chandlerc@google.com): If this ever breaks, we should -# investigate expanding this via autom4te from AS_VERSION_COMPARE rather than -# continuing to maintain our own shell version. -check_versions() -{ - major_version=${version%%.*} - minor_version="0" - point_version="0" - if test "${version#*.}" != "${version}"; then - minor_version=${version#*.} - minor_version=${minor_version%%.*} - fi - if test "${version#*.*.}" != "${version}"; then - point_version=${version#*.*.} - point_version=${point_version%%.*} - fi - - min_version="$1" - min_major_version=${min_version%%.*} - min_minor_version="0" - min_point_version="0" - if test "${min_version#*.}" != "${min_version}"; then - min_minor_version=${min_version#*.} - min_minor_version=${min_minor_version%%.*} - fi - if test "${min_version#*.*.}" != "${min_version}"; then - min_point_version=${min_version#*.*.} - min_point_version=${min_point_version%%.*} - fi - - max_version="$2" - max_major_version=${max_version%%.*} - max_minor_version="0" - max_point_version="0" - if test "${max_version#*.}" != "${max_version}"; then - max_minor_version=${max_version#*.} - max_minor_version=${max_minor_version%%.*} - fi - if test "${max_version#*.*.}" != "${max_version}"; then - max_point_version=${max_version#*.*.} - max_point_version=${max_point_version%%.*} - fi - - test $(($major_version)) -lt $(($min_major_version)) && exit 1 - if test $(($major_version)) -eq $(($min_major_version)); then - test $(($minor_version)) -lt $(($min_minor_version)) && exit 1 - if test $(($minor_version)) -eq $(($min_minor_version)); then - test $(($point_version)) -lt $(($min_point_version)) && exit 1 - fi - fi - - test $(($major_version)) -gt $(($max_major_version)) && exit 1 - if test $(($major_version)) -eq $(($max_major_version)); then - test $(($minor_version)) -gt $(($max_minor_version)) && exit 1 - if test $(($minor_version)) -eq $(($max_minor_version)); then - test $(($point_version)) -gt $(($max_point_version)) && exit 1 - fi - fi - - exit 0 -} - -# Show the usage line when no arguments are specified. -if test $# -eq 0; then - show_usage - exit 1 -fi - -while test $# -gt 0; do - case $1 in - --usage) show_usage; exit 0;; - --help) show_help; exit 0;; - - # Installation overrides - --prefix=*) GTEST_PREFIX=${1#--prefix=};; - --exec-prefix=*) GTEST_EXEC_PREFIX=${1#--exec-prefix=};; - --libdir=*) GTEST_LIBDIR=${1#--libdir=};; - --includedir=*) GTEST_INCLUDEDIR=${1#--includedir=};; - - # Installation queries - --prefix|--exec-prefix|--libdir|--includedir|--version) - if test -n "${do_query}"; then - show_usage - exit 1 - fi - do_query=${1#--} - ;; - - # Version checking - --min-version=*) - do_check_versions=yes - min_version=${1#--min-version=} - ;; - --max-version=*) - do_check_versions=yes - max_version=${1#--max-version=} - ;; - --exact-version=*) - do_check_versions=yes - exact_version=${1#--exact-version=} - ;; - - # Compiler flag output - --cppflags) echo_cppflags=yes;; - --cxxflags) echo_cxxflags=yes;; - --ldflags) echo_ldflags=yes;; - --libs) echo_libs=yes;; - - # Everything else is an error - *) show_usage; exit 1;; - esac - shift -done - -# These have defaults filled in by the configure script but can also be -# overridden by environment variables or command line parameters. -prefix="${GTEST_PREFIX:-@prefix@}" -exec_prefix="${GTEST_EXEC_PREFIX:-@exec_prefix@}" -libdir="${GTEST_LIBDIR:-@libdir@}" -includedir="${GTEST_INCLUDEDIR:-@includedir@}" - -# We try and detect if our binary is not located at its installed location. If -# it's not, we provide variables pointing to the source and build tree rather -# than to the install tree. This allows building against a just-built gtest -# rather than an installed gtest. -bindir="@bindir@" -this_relative_bindir=`dirname $0` -this_bindir=`cd ${this_relative_bindir}; pwd -P` -if test "${this_bindir}" = "${this_bindir%${bindir}}"; then - # The path to the script doesn't end in the bindir sequence from Autoconf, - # assume that we are in a build tree. - build_dir=`dirname ${this_bindir}` - src_dir=`cd ${this_bindir}; cd @top_srcdir@; pwd -P` - - # TODO(chandlerc@google.com): This is a dangerous dependency on libtool, we - # should work to remove it, and/or remove libtool altogether, replacing it - # with direct references to the library and a link path. - gtest_libs="${build_dir}/lib/libgtest.la @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" - gtest_ldflags="" - - # We provide hooks to include from either the source or build dir, where the - # build dir is always preferred. This will potentially allow us to write - # build rules for generated headers and have them automatically be preferred - # over provided versions. - gtest_cppflags="-I${build_dir}/include -I${src_dir}/include" - gtest_cxxflags="@PTHREAD_CFLAGS@" -else - # We're using an installed gtest, although it may be staged under some - # prefix. Assume (as our own libraries do) that we can resolve the prefix, - # and are present in the dynamic link paths. - gtest_ldflags="-L${libdir}" - gtest_libs="-l${name} @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" - gtest_cppflags="-I${includedir}" - gtest_cxxflags="@PTHREAD_CFLAGS@" -fi - -# Do an installation query if requested. -if test -n "$do_query"; then - case $do_query in - prefix) echo $prefix; exit 0;; - exec-prefix) echo $exec_prefix; exit 0;; - libdir) echo $libdir; exit 0;; - includedir) echo $includedir; exit 0;; - version) echo $version; exit 0;; - *) show_usage; exit 1;; - esac -fi - -# Do a version check if requested. -if test "$do_check_versions" = "yes"; then - # Make sure we didn't receive a bad combination of parameters. - test "$echo_cppflags" = "yes" && show_usage && exit 1 - test "$echo_cxxflags" = "yes" && show_usage && exit 1 - test "$echo_ldflags" = "yes" && show_usage && exit 1 - test "$echo_libs" = "yes" && show_usage && exit 1 - - if test "$exact_version" != ""; then - check_versions $exact_version $exact_version - # unreachable - else - check_versions ${min_version:-0.0.0} ${max_version:-9999.9999.9999} - # unreachable - fi -fi - -# Do the output in the correct order so that these can be used in-line of -# a compiler invocation. -output="" -test "$echo_cppflags" = "yes" && output="$output $gtest_cppflags" -test "$echo_cxxflags" = "yes" && output="$output $gtest_cxxflags" -test "$echo_ldflags" = "yes" && output="$output $gtest_ldflags" -test "$echo_libs" = "yes" && output="$output $gtest_libs" -echo $output - -exit 0 diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/release_docs.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/release_docs.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/release_docs.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 Google Inc. All Rights Reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Script for branching Google Test/Mock wiki pages for a new version. - -SYNOPSIS - release_docs.py NEW_RELEASE_VERSION - - Google Test and Google Mock's external user documentation is in - interlinked wiki files. When we release a new version of - Google Test or Google Mock, we need to branch the wiki files - such that users of a specific version of Google Test/Mock can - look up documentation relevant for that version. This script - automates that process by: - - - branching the current wiki pages (which document the - behavior of the SVN trunk head) to pages for the specified - version (e.g. branching FAQ.wiki to V2_6_FAQ.wiki when - NEW_RELEASE_VERSION is 2.6); - - updating the links in the branched files to point to the branched - version (e.g. a link in V2_6_FAQ.wiki that pointed to - Primer.wiki#Anchor will now point to V2_6_Primer.wiki#Anchor). - - NOTE: NEW_RELEASE_VERSION must be a NEW version number for - which the wiki pages don't yet exist; otherwise you'll get SVN - errors like "svn: Path 'V1_7_PumpManual.wiki' is not a - directory" when running the script. - -EXAMPLE - $ cd PATH/TO/GTEST_SVN_WORKSPACE/trunk - $ scripts/release_docs.py 2.6 # create wiki pages for v2.6 - $ svn status # verify the file list - $ svn diff # verify the file contents - $ svn commit -m "release wiki pages for v2.6" -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import re -import sys - -import common - - -# Wiki pages that shouldn't be branched for every gtest/gmock release. -GTEST_UNVERSIONED_WIKIS = ['DevGuide.wiki'] -GMOCK_UNVERSIONED_WIKIS = [ - 'DesignDoc.wiki', - 'DevGuide.wiki', - 'KnownIssues.wiki' - ] - - -def DropWikiSuffix(wiki_filename): - """Removes the .wiki suffix (if any) from the given filename.""" - - return (wiki_filename[:-len('.wiki')] if wiki_filename.endswith('.wiki') - else wiki_filename) - - -class WikiBrancher(object): - """Branches ...""" - - def __init__(self, dot_version): - self.project, svn_root_path = common.GetSvnInfo() - if self.project not in ('googletest', 'googlemock'): - sys.exit('This script must be run in a gtest or gmock SVN workspace.') - self.wiki_dir = svn_root_path + '/wiki' - # Turn '2.6' to 'V2_6_'. - self.version_prefix = 'V' + dot_version.replace('.', '_') + '_' - self.files_to_branch = self.GetFilesToBranch() - page_names = [DropWikiSuffix(f) for f in self.files_to_branch] - # A link to Foo.wiki is in one of the following forms: - # [Foo words] - # [Foo#Anchor words] - # [http://code.google.com/.../wiki/Foo words] - # [http://code.google.com/.../wiki/Foo#Anchor words] - # We want to replace 'Foo' with 'V2_6_Foo' in the above cases. - self.search_for_re = re.compile( - # This regex matches either - # [Foo - # or - # /wiki/Foo - # followed by a space or a #, where Foo is the name of an - # unversioned wiki page. - r'(\[|/wiki/)(%s)([ #])' % '|'.join(page_names)) - self.replace_with = r'\1%s\2\3' % (self.version_prefix,) - - def GetFilesToBranch(self): - """Returns a list of .wiki file names that need to be branched.""" - - unversioned_wikis = (GTEST_UNVERSIONED_WIKIS if self.project == 'googletest' - else GMOCK_UNVERSIONED_WIKIS) - return [f for f in os.listdir(self.wiki_dir) - if (f.endswith('.wiki') and - not re.match(r'^V\d', f) and # Excluded versioned .wiki files. - f not in unversioned_wikis)] - - def BranchFiles(self): - """Branches the .wiki files needed to be branched.""" - - print 'Branching %d .wiki files:' % (len(self.files_to_branch),) - os.chdir(self.wiki_dir) - for f in self.files_to_branch: - command = 'svn cp %s %s%s' % (f, self.version_prefix, f) - print command - os.system(command) - - def UpdateLinksInBranchedFiles(self): - - for f in self.files_to_branch: - source_file = os.path.join(self.wiki_dir, f) - versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f) - print 'Updating links in %s.' % (versioned_file,) - text = file(source_file, 'r').read() - new_text = self.search_for_re.sub(self.replace_with, text) - file(versioned_file, 'w').write(new_text) - - -def main(): - if len(sys.argv) != 2: - sys.exit(__doc__) - - brancher = WikiBrancher(sys.argv[1]) - brancher.BranchFiles() - brancher.UpdateLinksInBranchedFiles() - - -if __name__ == '__main__': - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/run_with_path.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/run_with_path.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/run_with_path.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Runs program specified in the command line with the substituted PATH. - - This script is needed for to support building under Pulse which is unable - to override the existing PATH variable. -""" - -import os -import subprocess -import sys - -SUBST_PATH_ENV_VAR_NAME = "SUBST_PATH" - -def main(): - if SUBST_PATH_ENV_VAR_NAME in os.environ: - os.environ["PATH"] = os.environ[SUBST_PATH_ENV_VAR_NAME] - - exit_code = subprocess.Popen(sys.argv[1:]).wait() - - # exit_code is negative (-signal) if the process has been terminated by - # a signal. Returning negative exit code is not portable and so we return - # 100 instead. - if exit_code < 0: - exit_code = 100 - - sys.exit(exit_code) - -if __name__ == "__main__": - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/test/Makefile b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/test/Makefile deleted file mode 100644 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/test/Makefile +++ /dev/null @@ -1,59 +0,0 @@ -# A Makefile for fusing Google Test and building a sample test against it. -# -# SYNOPSIS: -# -# make [all] - makes everything. -# make TARGET - makes the given target. -# make check - makes everything and runs the built sample test. -# make clean - removes all files generated by make. - -# Points to the root of fused Google Test, relative to where this file is. -FUSED_GTEST_DIR = output - -# Paths to the fused gtest files. -FUSED_GTEST_H = $(FUSED_GTEST_DIR)/gtest/gtest.h -FUSED_GTEST_ALL_CC = $(FUSED_GTEST_DIR)/gtest/gtest-all.cc - -# Where to find the sample test. -SAMPLE_DIR = ../../samples - -# Where to find gtest_main.cc. -GTEST_MAIN_CC = ../../src/gtest_main.cc - -# Flags passed to the preprocessor. -# We have no idea here whether pthreads is available in the system, so -# disable its use. -CPPFLAGS += -I$(FUSED_GTEST_DIR) -DGTEST_HAS_PTHREAD=0 - -# Flags passed to the C++ compiler. -CXXFLAGS += -g - -all : sample1_unittest - -check : all - ./sample1_unittest - -clean : - rm -rf $(FUSED_GTEST_DIR) sample1_unittest *.o - -$(FUSED_GTEST_H) : - ../fuse_gtest_files.py $(FUSED_GTEST_DIR) - -$(FUSED_GTEST_ALL_CC) : - ../fuse_gtest_files.py $(FUSED_GTEST_DIR) - -gtest-all.o : $(FUSED_GTEST_H) $(FUSED_GTEST_ALL_CC) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(FUSED_GTEST_DIR)/gtest/gtest-all.cc - -gtest_main.o : $(FUSED_GTEST_H) $(GTEST_MAIN_CC) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(GTEST_MAIN_CC) - -sample1.o : $(SAMPLE_DIR)/sample1.cc $(SAMPLE_DIR)/sample1.h - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1.cc - -sample1_unittest.o : $(SAMPLE_DIR)/sample1_unittest.cc \ - $(SAMPLE_DIR)/sample1.h $(FUSED_GTEST_H) - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1_unittest.cc - -sample1_unittest : sample1.o sample1_unittest.o gtest-all.o gtest_main.o - $(CXX) $(CPPFLAGS) $(CXXFLAGS) $^ -o $@ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload.py +++ /dev/null @@ -1,1402 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2007, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tool for uploading diffs from a version control system to the codereview app. - -Usage summary: upload.py [options] [-- diff_options] - -Diff options are passed to the diff command of the underlying system. - -Supported version control systems: - Git - Mercurial - Subversion - -It is important for Git/Mercurial users to specify a tree/node/branch to diff -against by using the '--rev' option. -""" -# This code is derived from appcfg.py in the App Engine SDK (open source), -# and from ASPN recipe #146306. - -import cookielib -import getpass -import logging -import md5 -import mimetypes -import optparse -import os -import re -import socket -import subprocess -import sys -import urllib -import urllib2 -import urlparse - -try: - import readline -except ImportError: - pass - -# The logging verbosity: -# 0: Errors only. -# 1: Status messages. -# 2: Info logs. -# 3: Debug logs. -verbosity = 1 - -# Max size of patch or base file. -MAX_UPLOAD_SIZE = 900 * 1024 - - -def GetEmail(prompt): - """Prompts the user for their email address and returns it. - - The last used email address is saved to a file and offered up as a suggestion - to the user. If the user presses enter without typing in anything the last - used email address is used. If the user enters a new address, it is saved - for next time we prompt. - - """ - last_email_file_name = os.path.expanduser("~/.last_codereview_email_address") - last_email = "" - if os.path.exists(last_email_file_name): - try: - last_email_file = open(last_email_file_name, "r") - last_email = last_email_file.readline().strip("\n") - last_email_file.close() - prompt += " [%s]" % last_email - except IOError, e: - pass - email = raw_input(prompt + ": ").strip() - if email: - try: - last_email_file = open(last_email_file_name, "w") - last_email_file.write(email) - last_email_file.close() - except IOError, e: - pass - else: - email = last_email - return email - - -def StatusUpdate(msg): - """Print a status message to stdout. - - If 'verbosity' is greater than 0, print the message. - - Args: - msg: The string to print. - """ - if verbosity > 0: - print msg - - -def ErrorExit(msg): - """Print an error message to stderr and exit.""" - print >>sys.stderr, msg - sys.exit(1) - - -class ClientLoginError(urllib2.HTTPError): - """Raised to indicate there was an error authenticating with ClientLogin.""" - - def __init__(self, url, code, msg, headers, args): - urllib2.HTTPError.__init__(self, url, code, msg, headers, None) - self.args = args - self.reason = args["Error"] - - -class AbstractRpcServer(object): - """Provides a common interface for a simple RPC server.""" - - def __init__(self, host, auth_function, host_override=None, extra_headers={}, - save_cookies=False): - """Creates a new HttpRpcServer. - - Args: - host: The host to send requests to. - auth_function: A function that takes no arguments and returns an - (email, password) tuple when called. Will be called if authentication - is required. - host_override: The host header to send to the server (defaults to host). - extra_headers: A dict of extra headers to append to every request. - save_cookies: If True, save the authentication cookies to local disk. - If False, use an in-memory cookiejar instead. Subclasses must - implement this functionality. Defaults to False. - """ - self.host = host - self.host_override = host_override - self.auth_function = auth_function - self.authenticated = False - self.extra_headers = extra_headers - self.save_cookies = save_cookies - self.opener = self._GetOpener() - if self.host_override: - logging.info("Server: %s; Host: %s", self.host, self.host_override) - else: - logging.info("Server: %s", self.host) - - def _GetOpener(self): - """Returns an OpenerDirector for making HTTP requests. - - Returns: - A urllib2.OpenerDirector object. - """ - raise NotImplementedError() - - def _CreateRequest(self, url, data=None): - """Creates a new urllib request.""" - logging.debug("Creating request for: '%s' with payload:\n%s", url, data) - req = urllib2.Request(url, data=data) - if self.host_override: - req.add_header("Host", self.host_override) - for key, value in self.extra_headers.iteritems(): - req.add_header(key, value) - return req - - def _GetAuthToken(self, email, password): - """Uses ClientLogin to authenticate the user, returning an auth token. - - Args: - email: The user's email address - password: The user's password - - Raises: - ClientLoginError: If there was an error authenticating with ClientLogin. - HTTPError: If there was some other form of HTTP error. - - Returns: - The authentication token returned by ClientLogin. - """ - account_type = "GOOGLE" - if self.host.endswith(".google.com"): - # Needed for use inside Google. - account_type = "HOSTED" - req = self._CreateRequest( - url="https://www.google.com/accounts/ClientLogin", - data=urllib.urlencode({ - "Email": email, - "Passwd": password, - "service": "ah", - "source": "rietveld-codereview-upload", - "accountType": account_type, - }), - ) - try: - response = self.opener.open(req) - response_body = response.read() - response_dict = dict(x.split("=") - for x in response_body.split("\n") if x) - return response_dict["Auth"] - except urllib2.HTTPError, e: - if e.code == 403: - body = e.read() - response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) - raise ClientLoginError(req.get_full_url(), e.code, e.msg, - e.headers, response_dict) - else: - raise - - def _GetAuthCookie(self, auth_token): - """Fetches authentication cookies for an authentication token. - - Args: - auth_token: The authentication token returned by ClientLogin. - - Raises: - HTTPError: If there was an error fetching the authentication cookies. - """ - # This is a dummy value to allow us to identify when we're successful. - continue_location = "http://localhost/" - args = {"continue": continue_location, "auth": auth_token} - req = self._CreateRequest("http://%s/_ah/login?%s" % - (self.host, urllib.urlencode(args))) - try: - response = self.opener.open(req) - except urllib2.HTTPError, e: - response = e - if (response.code != 302 or - response.info()["location"] != continue_location): - raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, - response.headers, response.fp) - self.authenticated = True - - def _Authenticate(self): - """Authenticates the user. - - The authentication process works as follows: - 1) We get a username and password from the user - 2) We use ClientLogin to obtain an AUTH token for the user - (see https://developers.google.com/identity/protocols/AuthForInstalledApps). - 3) We pass the auth token to /_ah/login on the server to obtain an - authentication cookie. If login was successful, it tries to redirect - us to the URL we provided. - - If we attempt to access the upload API without first obtaining an - authentication cookie, it returns a 401 response and directs us to - authenticate ourselves with ClientLogin. - """ - for i in range(3): - credentials = self.auth_function() - try: - auth_token = self._GetAuthToken(credentials[0], credentials[1]) - except ClientLoginError, e: - if e.reason == "BadAuthentication": - print >>sys.stderr, "Invalid username or password." - continue - if e.reason == "CaptchaRequired": - print >>sys.stderr, ( - "Please go to\n" - "https://www.google.com/accounts/DisplayUnlockCaptcha\n" - "and verify you are a human. Then try again.") - break - if e.reason == "NotVerified": - print >>sys.stderr, "Account not verified." - break - if e.reason == "TermsNotAgreed": - print >>sys.stderr, "User has not agreed to TOS." - break - if e.reason == "AccountDeleted": - print >>sys.stderr, "The user account has been deleted." - break - if e.reason == "AccountDisabled": - print >>sys.stderr, "The user account has been disabled." - break - if e.reason == "ServiceDisabled": - print >>sys.stderr, ("The user's access to the service has been " - "disabled.") - break - if e.reason == "ServiceUnavailable": - print >>sys.stderr, "The service is not available; try again later." - break - raise - self._GetAuthCookie(auth_token) - return - - def Send(self, request_path, payload=None, - content_type="application/octet-stream", - timeout=None, - **kwargs): - """Sends an RPC and returns the response. - - Args: - request_path: The path to send the request to, eg /api/appversion/create. - payload: The body of the request, or None to send an empty request. - content_type: The Content-Type header to use. - timeout: timeout in seconds; default None i.e. no timeout. - (Note: for large requests on OS X, the timeout doesn't work right.) - kwargs: Any keyword arguments are converted into query string parameters. - - Returns: - The response body, as a string. - """ - # TODO: Don't require authentication. Let the server say - # whether it is necessary. - if not self.authenticated: - self._Authenticate() - - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - tries = 0 - while True: - tries += 1 - args = dict(kwargs) - url = "http://%s%s" % (self.host, request_path) - if args: - url += "?" + urllib.urlencode(args) - req = self._CreateRequest(url=url, data=payload) - req.add_header("Content-Type", content_type) - try: - f = self.opener.open(req) - response = f.read() - f.close() - return response - except urllib2.HTTPError, e: - if tries > 3: - raise - elif e.code == 401: - self._Authenticate() -## elif e.code >= 500 and e.code < 600: -## # Server Error - try again. -## continue - else: - raise - finally: - socket.setdefaulttimeout(old_timeout) - - -class HttpRpcServer(AbstractRpcServer): - """Provides a simplified RPC-style interface for HTTP requests.""" - - def _Authenticate(self): - """Save the cookie jar after authentication.""" - super(HttpRpcServer, self)._Authenticate() - if self.save_cookies: - StatusUpdate("Saving authentication cookies to %s" % self.cookie_file) - self.cookie_jar.save() - - def _GetOpener(self): - """Returns an OpenerDirector that supports cookies and ignores redirects. - - Returns: - A urllib2.OpenerDirector object. - """ - opener = urllib2.OpenerDirector() - opener.add_handler(urllib2.ProxyHandler()) - opener.add_handler(urllib2.UnknownHandler()) - opener.add_handler(urllib2.HTTPHandler()) - opener.add_handler(urllib2.HTTPDefaultErrorHandler()) - opener.add_handler(urllib2.HTTPSHandler()) - opener.add_handler(urllib2.HTTPErrorProcessor()) - if self.save_cookies: - self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") - self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) - if os.path.exists(self.cookie_file): - try: - self.cookie_jar.load() - self.authenticated = True - StatusUpdate("Loaded authentication cookies from %s" % - self.cookie_file) - except (cookielib.LoadError, IOError): - # Failed to load cookies - just ignore them. - pass - else: - # Create an empty cookie file with mode 600 - fd = os.open(self.cookie_file, os.O_CREAT, 0600) - os.close(fd) - # Always chmod the cookie file - os.chmod(self.cookie_file, 0600) - else: - # Don't save cookies across runs of update.py. - self.cookie_jar = cookielib.CookieJar() - opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) - return opener - - -parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]") -parser.add_option("-y", "--assume_yes", action="store_true", - dest="assume_yes", default=False, - help="Assume that the answer to yes/no questions is 'yes'.") -# Logging -group = parser.add_option_group("Logging options") -group.add_option("-q", "--quiet", action="store_const", const=0, - dest="verbose", help="Print errors only.") -group.add_option("-v", "--verbose", action="store_const", const=2, - dest="verbose", default=1, - help="Print info level logs (default).") -group.add_option("--noisy", action="store_const", const=3, - dest="verbose", help="Print all logs.") -# Review server -group = parser.add_option_group("Review server options") -group.add_option("-s", "--server", action="store", dest="server", - default="codereview.appspot.com", - metavar="SERVER", - help=("The server to upload to. The format is host[:port]. " - "Defaults to 'codereview.appspot.com'.")) -group.add_option("-e", "--email", action="store", dest="email", - metavar="EMAIL", default=None, - help="The username to use. Will prompt if omitted.") -group.add_option("-H", "--host", action="store", dest="host", - metavar="HOST", default=None, - help="Overrides the Host header sent with all RPCs.") -group.add_option("--no_cookies", action="store_false", - dest="save_cookies", default=True, - help="Do not save authentication cookies to local disk.") -# Issue -group = parser.add_option_group("Issue options") -group.add_option("-d", "--description", action="store", dest="description", - metavar="DESCRIPTION", default=None, - help="Optional description when creating an issue.") -group.add_option("-f", "--description_file", action="store", - dest="description_file", metavar="DESCRIPTION_FILE", - default=None, - help="Optional path of a file that contains " - "the description when creating an issue.") -group.add_option("-r", "--reviewers", action="store", dest="reviewers", - metavar="REVIEWERS", default=None, - help="Add reviewers (comma separated email addresses).") -group.add_option("--cc", action="store", dest="cc", - metavar="CC", default=None, - help="Add CC (comma separated email addresses).") -# Upload options -group = parser.add_option_group("Patch options") -group.add_option("-m", "--message", action="store", dest="message", - metavar="MESSAGE", default=None, - help="A message to identify the patch. " - "Will prompt if omitted.") -group.add_option("-i", "--issue", type="int", action="store", - metavar="ISSUE", default=None, - help="Issue number to which to add. Defaults to new issue.") -group.add_option("--download_base", action="store_true", - dest="download_base", default=False, - help="Base files will be downloaded by the server " - "(side-by-side diffs may not work on files with CRs).") -group.add_option("--rev", action="store", dest="revision", - metavar="REV", default=None, - help="Branch/tree/revision to diff against (used by DVCS).") -group.add_option("--send_mail", action="store_true", - dest="send_mail", default=False, - help="Send notification email to reviewers.") - - -def GetRpcServer(options): - """Returns an instance of an AbstractRpcServer. - - Returns: - A new AbstractRpcServer, on which RPC calls can be made. - """ - - rpc_server_class = HttpRpcServer - - def GetUserCredentials(): - """Prompts the user for a username and password.""" - email = options.email - if email is None: - email = GetEmail("Email (login for uploading to %s)" % options.server) - password = getpass.getpass("Password for %s: " % email) - return (email, password) - - # If this is the dev_appserver, use fake authentication. - host = (options.host or options.server).lower() - if host == "localhost" or host.startswith("localhost:"): - email = options.email - if email is None: - email = "test@example.com" - logging.info("Using debug user %s. Override with --email" % email) - server = rpc_server_class( - options.server, - lambda: (email, "password"), - host_override=options.host, - extra_headers={"Cookie": - 'dev_appserver_login="%s:False"' % email}, - save_cookies=options.save_cookies) - # Don't try to talk to ClientLogin. - server.authenticated = True - return server - - return rpc_server_class(options.server, GetUserCredentials, - host_override=options.host, - save_cookies=options.save_cookies) - - -def EncodeMultipartFormData(fields, files): - """Encode form fields for multipart/form-data. - - Args: - fields: A sequence of (name, value) elements for regular form fields. - files: A sequence of (name, filename, value) elements for data to be - uploaded as files. - Returns: - (content_type, body) ready for httplib.HTTP instance. - - Source: - https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306 - """ - BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' - CRLF = '\r\n' - lines = [] - for (key, value) in fields: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"' % key) - lines.append('') - lines.append(value) - for (key, filename, value) in files: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % - (key, filename)) - lines.append('Content-Type: %s' % GetContentType(filename)) - lines.append('') - lines.append(value) - lines.append('--' + BOUNDARY + '--') - lines.append('') - body = CRLF.join(lines) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - - -def GetContentType(filename): - """Helper to guess the content-type from the filename.""" - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' - - -# Use a shell for subcommands on Windows to get a PATH search. -use_shell = sys.platform.startswith("win") - -def RunShellWithReturnCode(command, print_output=False, - universal_newlines=True): - """Executes a command and returns the output from stdout and the return code. - - Args: - command: Command to execute. - print_output: If True, the output is printed to stdout. - If False, both stdout and stderr are ignored. - universal_newlines: Use universal_newlines flag (default: True). - - Returns: - Tuple (output, return code) - """ - logging.info("Running %s", command) - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=use_shell, universal_newlines=universal_newlines) - if print_output: - output_array = [] - while True: - line = p.stdout.readline() - if not line: - break - print line.strip("\n") - output_array.append(line) - output = "".join(output_array) - else: - output = p.stdout.read() - p.wait() - errout = p.stderr.read() - if print_output and errout: - print >>sys.stderr, errout - p.stdout.close() - p.stderr.close() - return output, p.returncode - - -def RunShell(command, silent_ok=False, universal_newlines=True, - print_output=False): - data, retcode = RunShellWithReturnCode(command, print_output, - universal_newlines) - if retcode: - ErrorExit("Got error status from %s:\n%s" % (command, data)) - if not silent_ok and not data: - ErrorExit("No output from %s" % command) - return data - - -class VersionControlSystem(object): - """Abstract base class providing an interface to the VCS.""" - - def __init__(self, options): - """Constructor. - - Args: - options: Command line options. - """ - self.options = options - - def GenerateDiff(self, args): - """Return the current diff as a string. - - Args: - args: Extra arguments to pass to the diff command. - """ - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - def GetUnknownFiles(self): - """Return a list of files unknown to the VCS.""" - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - def CheckForUnknownFiles(self): - """Show an "are you sure?" prompt if there are unknown files.""" - unknown_files = self.GetUnknownFiles() - if unknown_files: - print "The following files are not added to version control:" - for line in unknown_files: - print line - prompt = "Are you sure to continue?(y/N) " - answer = raw_input(prompt).strip() - if answer != "y": - ErrorExit("User aborted") - - def GetBaseFile(self, filename): - """Get the content of the upstream version of a file. - - Returns: - A tuple (base_content, new_content, is_binary, status) - base_content: The contents of the base file. - new_content: For text files, this is empty. For binary files, this is - the contents of the new file, since the diff output won't contain - information to reconstruct the current file. - is_binary: True iff the file is binary. - status: The status of the file. - """ - - raise NotImplementedError( - "abstract method -- subclass %s must override" % self.__class__) - - - def GetBaseFiles(self, diff): - """Helper that calls GetBase file for each file in the patch. - - Returns: - A dictionary that maps from filename to GetBaseFile's tuple. Filenames - are retrieved based on lines that start with "Index:" or - "Property changes on:". - """ - files = {} - for line in diff.splitlines(True): - if line.startswith('Index:') or line.startswith('Property changes on:'): - unused, filename = line.split(':', 1) - # On Windows if a file has property changes its filename uses '\' - # instead of '/'. - filename = filename.strip().replace('\\', '/') - files[filename] = self.GetBaseFile(filename) - return files - - - def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options, - files): - """Uploads the base files (and if necessary, the current ones as well).""" - - def UploadFile(filename, file_id, content, is_binary, status, is_base): - """Uploads a file to the server.""" - file_too_large = False - if is_base: - type = "base" - else: - type = "current" - if len(content) > MAX_UPLOAD_SIZE: - print ("Not uploading the %s file for %s because it's too large." % - (type, filename)) - file_too_large = True - content = "" - checksum = md5.new(content).hexdigest() - if options.verbose > 0 and not file_too_large: - print "Uploading %s file for %s" % (type, filename) - url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) - form_fields = [("filename", filename), - ("status", status), - ("checksum", checksum), - ("is_binary", str(is_binary)), - ("is_current", str(not is_base)), - ] - if file_too_large: - form_fields.append(("file_too_large", "1")) - if options.email: - form_fields.append(("user", options.email)) - ctype, body = EncodeMultipartFormData(form_fields, - [("data", filename, content)]) - response_body = rpc_server.Send(url, body, - content_type=ctype) - if not response_body.startswith("OK"): - StatusUpdate(" --> %s" % response_body) - sys.exit(1) - - patches = dict() - [patches.setdefault(v, k) for k, v in patch_list] - for filename in patches.keys(): - base_content, new_content, is_binary, status = files[filename] - file_id_str = patches.get(filename) - if file_id_str.find("nobase") != -1: - base_content = None - file_id_str = file_id_str[file_id_str.rfind("_") + 1:] - file_id = int(file_id_str) - if base_content != None: - UploadFile(filename, file_id, base_content, is_binary, status, True) - if new_content != None: - UploadFile(filename, file_id, new_content, is_binary, status, False) - - def IsImage(self, filename): - """Returns true if the filename has an image extension.""" - mimetype = mimetypes.guess_type(filename)[0] - if not mimetype: - return False - return mimetype.startswith("image/") - - -class SubversionVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Subversion.""" - - def __init__(self, options): - super(SubversionVCS, self).__init__(options) - if self.options.revision: - match = re.match(r"(\d+)(:(\d+))?", self.options.revision) - if not match: - ErrorExit("Invalid Subversion revision %s." % self.options.revision) - self.rev_start = match.group(1) - self.rev_end = match.group(3) - else: - self.rev_start = self.rev_end = None - # Cache output from "svn list -r REVNO dirname". - # Keys: dirname, Values: 2-tuple (output for start rev and end rev). - self.svnls_cache = {} - # SVN base URL is required to fetch files deleted in an older revision. - # Result is cached to not guess it over and over again in GetBaseFile(). - required = self.options.download_base or self.options.revision is not None - self.svn_base = self._GuessBase(required) - - def GuessBase(self, required): - """Wrapper for _GuessBase.""" - return self.svn_base - - def _GuessBase(self, required): - """Returns the SVN base URL. - - Args: - required: If true, exits if the url can't be guessed, otherwise None is - returned. - """ - info = RunShell(["svn", "info"]) - for line in info.splitlines(): - words = line.split() - if len(words) == 2 and words[0] == "URL:": - url = words[1] - scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) - username, netloc = urllib.splituser(netloc) - if username: - logging.info("Removed username from base URL") - if netloc.endswith("svn.python.org"): - if netloc == "svn.python.org": - if path.startswith("/projects/"): - path = path[9:] - elif netloc != "pythondev@svn.python.org": - ErrorExit("Unrecognized Python URL: %s" % url) - base = "http://svn.python.org/view/*checkout*%s/" % path - logging.info("Guessed Python base = %s", base) - elif netloc.endswith("svn.collab.net"): - if path.startswith("/repos/"): - path = path[6:] - base = "http://svn.collab.net/viewvc/*checkout*%s/" % path - logging.info("Guessed CollabNet base = %s", base) - elif netloc.endswith(".googlecode.com"): - path = path + "/" - base = urlparse.urlunparse(("http", netloc, path, params, - query, fragment)) - logging.info("Guessed Google Code base = %s", base) - else: - path = path + "/" - base = urlparse.urlunparse((scheme, netloc, path, params, - query, fragment)) - logging.info("Guessed base = %s", base) - return base - if required: - ErrorExit("Can't find URL in output from svn info") - return None - - def GenerateDiff(self, args): - cmd = ["svn", "diff"] - if self.options.revision: - cmd += ["-r", self.options.revision] - cmd.extend(args) - data = RunShell(cmd) - count = 0 - for line in data.splitlines(): - if line.startswith("Index:") or line.startswith("Property changes on:"): - count += 1 - logging.info(line) - if not count: - ErrorExit("No valid patches found in output from svn diff") - return data - - def _CollapseKeywords(self, content, keyword_str): - """Collapses SVN keywords.""" - # svn cat translates keywords but svn diff doesn't. As a result of this - # behavior patching.PatchChunks() fails with a chunk mismatch error. - # This part was originally written by the Review Board development team - # who had the same problem (https://reviews.reviewboard.org/r/276/). - # Mapping of keywords to known aliases - svn_keywords = { - # Standard keywords - 'Date': ['Date', 'LastChangedDate'], - 'Revision': ['Revision', 'LastChangedRevision', 'Rev'], - 'Author': ['Author', 'LastChangedBy'], - 'HeadURL': ['HeadURL', 'URL'], - 'Id': ['Id'], - - # Aliases - 'LastChangedDate': ['LastChangedDate', 'Date'], - 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'], - 'LastChangedBy': ['LastChangedBy', 'Author'], - 'URL': ['URL', 'HeadURL'], - } - - def repl(m): - if m.group(2): - return "$%s::%s$" % (m.group(1), " " * len(m.group(3))) - return "$%s$" % m.group(1) - keywords = [keyword - for name in keyword_str.split(" ") - for keyword in svn_keywords.get(name, [])] - return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content) - - def GetUnknownFiles(self): - status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True) - unknown_files = [] - for line in status.split("\n"): - if line and line[0] == "?": - unknown_files.append(line) - return unknown_files - - def ReadFile(self, filename): - """Returns the contents of a file.""" - file = open(filename, 'rb') - result = "" - try: - result = file.read() - finally: - file.close() - return result - - def GetStatus(self, filename): - """Returns the status of a file.""" - if not self.options.revision: - status = RunShell(["svn", "status", "--ignore-externals", filename]) - if not status: - ErrorExit("svn status returned no output for %s" % filename) - status_lines = status.splitlines() - # If file is in a cl, the output will begin with - # "\n--- Changelist 'cl_name':\n". See - # https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt - if (len(status_lines) == 3 and - not status_lines[0] and - status_lines[1].startswith("--- Changelist")): - status = status_lines[2] - else: - status = status_lines[0] - # If we have a revision to diff against we need to run "svn list" - # for the old and the new revision and compare the results to get - # the correct status for a file. - else: - dirname, relfilename = os.path.split(filename) - if dirname not in self.svnls_cache: - cmd = ["svn", "list", "-r", self.rev_start, dirname or "."] - out, returncode = RunShellWithReturnCode(cmd) - if returncode: - ErrorExit("Failed to get status for %s." % filename) - old_files = out.splitlines() - args = ["svn", "list"] - if self.rev_end: - args += ["-r", self.rev_end] - cmd = args + [dirname or "."] - out, returncode = RunShellWithReturnCode(cmd) - if returncode: - ErrorExit("Failed to run command %s" % cmd) - self.svnls_cache[dirname] = (old_files, out.splitlines()) - old_files, new_files = self.svnls_cache[dirname] - if relfilename in old_files and relfilename not in new_files: - status = "D " - elif relfilename in old_files and relfilename in new_files: - status = "M " - else: - status = "A " - return status - - def GetBaseFile(self, filename): - status = self.GetStatus(filename) - base_content = None - new_content = None - - # If a file is copied its status will be "A +", which signifies - # "addition-with-history". See "svn st" for more information. We need to - # upload the original file or else diff parsing will fail if the file was - # edited. - if status[0] == "A" and status[3] != "+": - # We'll need to upload the new content if we're adding a binary file - # since diff's output won't contain it. - mimetype = RunShell(["svn", "propget", "svn:mime-type", filename], - silent_ok=True) - base_content = "" - is_binary = mimetype and not mimetype.startswith("text/") - if is_binary and self.IsImage(filename): - new_content = self.ReadFile(filename) - elif (status[0] in ("M", "D", "R") or - (status[0] == "A" and status[3] == "+") or # Copied file. - (status[0] == " " and status[1] == "M")): # Property change. - args = [] - if self.options.revision: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - else: - # Don't change filename, it's needed later. - url = filename - args += ["-r", "BASE"] - cmd = ["svn"] + args + ["propget", "svn:mime-type", url] - mimetype, returncode = RunShellWithReturnCode(cmd) - if returncode: - # File does not exist in the requested revision. - # Reset mimetype, it contains an error message. - mimetype = "" - get_base = False - is_binary = mimetype and not mimetype.startswith("text/") - if status[0] == " ": - # Empty base content just to force an upload. - base_content = "" - elif is_binary: - if self.IsImage(filename): - get_base = True - if status[0] == "M": - if not self.rev_end: - new_content = self.ReadFile(filename) - else: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end) - new_content = RunShell(["svn", "cat", url], - universal_newlines=True, silent_ok=True) - else: - base_content = "" - else: - get_base = True - - if get_base: - if is_binary: - universal_newlines = False - else: - universal_newlines = True - if self.rev_start: - # "svn cat -r REV delete_file.txt" doesn't work. cat requires - # the full URL with "@REV" appended instead of using "-r" option. - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - base_content = RunShell(["svn", "cat", url], - universal_newlines=universal_newlines, - silent_ok=True) - else: - base_content = RunShell(["svn", "cat", filename], - universal_newlines=universal_newlines, - silent_ok=True) - if not is_binary: - args = [] - if self.rev_start: - url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) - else: - url = filename - args += ["-r", "BASE"] - cmd = ["svn"] + args + ["propget", "svn:keywords", url] - keywords, returncode = RunShellWithReturnCode(cmd) - if keywords and not returncode: - base_content = self._CollapseKeywords(base_content, keywords) - else: - StatusUpdate("svn status returned unexpected output: %s" % status) - sys.exit(1) - return base_content, new_content, is_binary, status[0:5] - - -class GitVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Git.""" - - def __init__(self, options): - super(GitVCS, self).__init__(options) - # Map of filename -> hash of base file. - self.base_hashes = {} - - def GenerateDiff(self, extra_args): - # This is more complicated than svn's GenerateDiff because we must convert - # the diff output to include an svn-style "Index:" line as well as record - # the hashes of the base files, so we can upload them along with our diff. - if self.options.revision: - extra_args = [self.options.revision] + extra_args - gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args) - svndiff = [] - filecount = 0 - filename = None - for line in gitdiff.splitlines(): - match = re.match(r"diff --git a/(.*) b/.*$", line) - if match: - filecount += 1 - filename = match.group(1) - svndiff.append("Index: %s\n" % filename) - else: - # The "index" line in a git diff looks like this (long hashes elided): - # index 82c0d44..b2cee3f 100755 - # We want to save the left hash, as that identifies the base file. - match = re.match(r"index (\w+)\.\.", line) - if match: - self.base_hashes[filename] = match.group(1) - svndiff.append(line + "\n") - if not filecount: - ErrorExit("No valid patches found in output from git diff") - return "".join(svndiff) - - def GetUnknownFiles(self): - status = RunShell(["git", "ls-files", "--exclude-standard", "--others"], - silent_ok=True) - return status.splitlines() - - def GetBaseFile(self, filename): - hash = self.base_hashes[filename] - base_content = None - new_content = None - is_binary = False - if hash == "0" * 40: # All-zero hash indicates no base file. - status = "A" - base_content = "" - else: - status = "M" - base_content, returncode = RunShellWithReturnCode(["git", "show", hash]) - if returncode: - ErrorExit("Got error status from 'git show %s'" % hash) - return (base_content, new_content, is_binary, status) - - -class MercurialVCS(VersionControlSystem): - """Implementation of the VersionControlSystem interface for Mercurial.""" - - def __init__(self, options, repo_dir): - super(MercurialVCS, self).__init__(options) - # Absolute path to repository (we can be in a subdir) - self.repo_dir = os.path.normpath(repo_dir) - # Compute the subdir - cwd = os.path.normpath(os.getcwd()) - assert cwd.startswith(self.repo_dir) - self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/") - if self.options.revision: - self.base_rev = self.options.revision - else: - self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip() - - def _GetRelPath(self, filename): - """Get relative path of a file according to the current directory, - given its logical path in the repo.""" - assert filename.startswith(self.subdir), filename - return filename[len(self.subdir):].lstrip(r"\/") - - def GenerateDiff(self, extra_args): - # If no file specified, restrict to the current subdir - extra_args = extra_args or ["."] - cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args - data = RunShell(cmd, silent_ok=True) - svndiff = [] - filecount = 0 - for line in data.splitlines(): - m = re.match("diff --git a/(\S+) b/(\S+)", line) - if m: - # Modify line to make it look like as it comes from svn diff. - # With this modification no changes on the server side are required - # to make upload.py work with Mercurial repos. - # NOTE: for proper handling of moved/copied files, we have to use - # the second filename. - filename = m.group(2) - svndiff.append("Index: %s" % filename) - svndiff.append("=" * 67) - filecount += 1 - logging.info(line) - else: - svndiff.append(line) - if not filecount: - ErrorExit("No valid patches found in output from hg diff") - return "\n".join(svndiff) + "\n" - - def GetUnknownFiles(self): - """Return a list of files unknown to the VCS.""" - args = [] - status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."], - silent_ok=True) - unknown_files = [] - for line in status.splitlines(): - st, fn = line.split(" ", 1) - if st == "?": - unknown_files.append(fn) - return unknown_files - - def GetBaseFile(self, filename): - # "hg status" and "hg cat" both take a path relative to the current subdir - # rather than to the repo root, but "hg diff" has given us the full path - # to the repo root. - base_content = "" - new_content = None - is_binary = False - oldrelpath = relpath = self._GetRelPath(filename) - # "hg status -C" returns two lines for moved/copied files, one otherwise - out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath]) - out = out.splitlines() - # HACK: strip error message about missing file/directory if it isn't in - # the working copy - if out[0].startswith('%s: ' % relpath): - out = out[1:] - if len(out) > 1: - # Moved/copied => considered as modified, use old filename to - # retrieve base contents - oldrelpath = out[1].strip() - status = "M" - else: - status, _ = out[0].split(' ', 1) - if status != "A": - base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], - silent_ok=True) - is_binary = "\0" in base_content # Mercurial's heuristic - if status != "R": - new_content = open(relpath, "rb").read() - is_binary = is_binary or "\0" in new_content - if is_binary and base_content: - # Fetch again without converting newlines - base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], - silent_ok=True, universal_newlines=False) - if not is_binary or not self.IsImage(relpath): - new_content = None - return base_content, new_content, is_binary, status - - -# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync. -def SplitPatch(data): - """Splits a patch into separate pieces for each file. - - Args: - data: A string containing the output of svn diff. - - Returns: - A list of 2-tuple (filename, text) where text is the svn diff output - pertaining to filename. - """ - patches = [] - filename = None - diff = [] - for line in data.splitlines(True): - new_filename = None - if line.startswith('Index:'): - unused, new_filename = line.split(':', 1) - new_filename = new_filename.strip() - elif line.startswith('Property changes on:'): - unused, temp_filename = line.split(':', 1) - # When a file is modified, paths use '/' between directories, however - # when a property is modified '\' is used on Windows. Make them the same - # otherwise the file shows up twice. - temp_filename = temp_filename.strip().replace('\\', '/') - if temp_filename != filename: - # File has property changes but no modifications, create a new diff. - new_filename = temp_filename - if new_filename: - if filename and diff: - patches.append((filename, ''.join(diff))) - filename = new_filename - diff = [line] - continue - if diff is not None: - diff.append(line) - if filename and diff: - patches.append((filename, ''.join(diff))) - return patches - - -def UploadSeparatePatches(issue, rpc_server, patchset, data, options): - """Uploads a separate patch for each file in the diff output. - - Returns a list of [patch_key, filename] for each file. - """ - patches = SplitPatch(data) - rv = [] - for patch in patches: - if len(patch[1]) > MAX_UPLOAD_SIZE: - print ("Not uploading the patch for " + patch[0] + - " because the file is too large.") - continue - form_fields = [("filename", patch[0])] - if not options.download_base: - form_fields.append(("content_upload", "1")) - files = [("data", "data.diff", patch[1])] - ctype, body = EncodeMultipartFormData(form_fields, files) - url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) - print "Uploading patch for " + patch[0] - response_body = rpc_server.Send(url, body, content_type=ctype) - lines = response_body.splitlines() - if not lines or lines[0] != "OK": - StatusUpdate(" --> %s" % response_body) - sys.exit(1) - rv.append([lines[1], patch[0]]) - return rv - - -def GuessVCS(options): - """Helper to guess the version control system. - - This examines the current directory, guesses which VersionControlSystem - we're using, and returns an instance of the appropriate class. Exit with an - error if we can't figure it out. - - Returns: - A VersionControlSystem instance. Exits if the VCS can't be guessed. - """ - # Mercurial has a command to get the base directory of a repository - # Try running it, but don't die if we don't have hg installed. - # NOTE: we try Mercurial first as it can sit on top of an SVN working copy. - try: - out, returncode = RunShellWithReturnCode(["hg", "root"]) - if returncode == 0: - return MercurialVCS(options, out.strip()) - except OSError, (errno, message): - if errno != 2: # ENOENT -- they don't have hg installed. - raise - - # Subversion has a .svn in all working directories. - if os.path.isdir('.svn'): - logging.info("Guessed VCS = Subversion") - return SubversionVCS(options) - - # Git has a command to test if you're in a git tree. - # Try running it, but don't die if we don't have git installed. - try: - out, returncode = RunShellWithReturnCode(["git", "rev-parse", - "--is-inside-work-tree"]) - if returncode == 0: - return GitVCS(options) - except OSError, (errno, message): - if errno != 2: # ENOENT -- they don't have git installed. - raise - - ErrorExit(("Could not guess version control system. " - "Are you in a working copy directory?")) - - -def RealMain(argv, data=None): - """The real main function. - - Args: - argv: Command line arguments. - data: Diff contents. If None (default) the diff is generated by - the VersionControlSystem implementation returned by GuessVCS(). - - Returns: - A 2-tuple (issue id, patchset id). - The patchset id is None if the base files are not uploaded by this - script (applies only to SVN checkouts). - """ - logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:" - "%(lineno)s %(message)s ")) - os.environ['LC_ALL'] = 'C' - options, args = parser.parse_args(argv[1:]) - global verbosity - verbosity = options.verbose - if verbosity >= 3: - logging.getLogger().setLevel(logging.DEBUG) - elif verbosity >= 2: - logging.getLogger().setLevel(logging.INFO) - vcs = GuessVCS(options) - if isinstance(vcs, SubversionVCS): - # base field is only allowed for Subversion. - # Note: Fetching base files may become deprecated in future releases. - base = vcs.GuessBase(options.download_base) - else: - base = None - if not base and options.download_base: - options.download_base = True - logging.info("Enabled upload of base file") - if not options.assume_yes: - vcs.CheckForUnknownFiles() - if data is None: - data = vcs.GenerateDiff(args) - files = vcs.GetBaseFiles(data) - if verbosity >= 1: - print "Upload server:", options.server, "(change with -s/--server)" - if options.issue: - prompt = "Message describing this patch set: " - else: - prompt = "New issue subject: " - message = options.message or raw_input(prompt).strip() - if not message: - ErrorExit("A non-empty message is required") - rpc_server = GetRpcServer(options) - form_fields = [("subject", message)] - if base: - form_fields.append(("base", base)) - if options.issue: - form_fields.append(("issue", str(options.issue))) - if options.email: - form_fields.append(("user", options.email)) - if options.reviewers: - for reviewer in options.reviewers.split(','): - if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1: - ErrorExit("Invalid email address: %s" % reviewer) - form_fields.append(("reviewers", options.reviewers)) - if options.cc: - for cc in options.cc.split(','): - if "@" in cc and not cc.split("@")[1].count(".") == 1: - ErrorExit("Invalid email address: %s" % cc) - form_fields.append(("cc", options.cc)) - description = options.description - if options.description_file: - if options.description: - ErrorExit("Can't specify description and description_file") - file = open(options.description_file, 'r') - description = file.read() - file.close() - if description: - form_fields.append(("description", description)) - # Send a hash of all the base file so the server can determine if a copy - # already exists in an earlier patchset. - base_hashes = "" - for file, info in files.iteritems(): - if not info[0] is None: - checksum = md5.new(info[0]).hexdigest() - if base_hashes: - base_hashes += "|" - base_hashes += checksum + ":" + file - form_fields.append(("base_hashes", base_hashes)) - # If we're uploading base files, don't send the email before the uploads, so - # that it contains the file status. - if options.send_mail and options.download_base: - form_fields.append(("send_mail", "1")) - if not options.download_base: - form_fields.append(("content_upload", "1")) - if len(data) > MAX_UPLOAD_SIZE: - print "Patch is large, so uploading file patches separately." - uploaded_diff_file = [] - form_fields.append(("separate_patches", "1")) - else: - uploaded_diff_file = [("data", "data.diff", data)] - ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file) - response_body = rpc_server.Send("/upload", body, content_type=ctype) - patchset = None - if not options.download_base or not uploaded_diff_file: - lines = response_body.splitlines() - if len(lines) >= 2: - msg = lines[0] - patchset = lines[1].strip() - patches = [x.split(" ", 1) for x in lines[2:]] - else: - msg = response_body - else: - msg = response_body - StatusUpdate(msg) - if not response_body.startswith("Issue created.") and \ - not response_body.startswith("Issue updated."): - sys.exit(0) - issue = msg[msg.rfind("/")+1:] - - if not uploaded_diff_file: - result = UploadSeparatePatches(issue, rpc_server, patchset, data, options) - if not options.download_base: - patches = result - - if not options.download_base: - vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files) - if options.send_mail: - rpc_server.Send("/" + issue + "/mail", payload="") - return issue, patchset - - -def main(): - try: - RealMain(sys.argv) - except KeyboardInterrupt: - print - StatusUpdate("Interrupted.") - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload_gtest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload_gtest.py deleted file mode 100755 --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/scripts/upload_gtest.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review. - -This simple wrapper passes all command line flags and ---cc=googletestframework@googlegroups.com to upload.py. - -USAGE: upload_gtest.py [options for upload.py] -""" - -__author__ = 'wan@google.com (Zhanyong Wan)' - -import os -import sys - -CC_FLAG = '--cc=' -GTEST_GROUP = 'googletestframework@googlegroups.com' - - -def main(): - # Finds the path to upload.py, assuming it is in the same directory - # as this file. - my_dir = os.path.dirname(os.path.abspath(__file__)) - upload_py_path = os.path.join(my_dir, 'upload.py') - - # Adds Google Test discussion group to the cc line if it's not there - # already. - upload_py_argv = [upload_py_path] - found_cc_flag = False - for arg in sys.argv[1:]: - if arg.startswith(CC_FLAG): - found_cc_flag = True - cc_line = arg[len(CC_FLAG):] - cc_list = [addr for addr in cc_line.split(',') if addr] - if GTEST_GROUP not in cc_list: - cc_list.append(GTEST_GROUP) - upload_py_argv.append(CC_FLAG + ','.join(cc_list)) - else: - upload_py_argv.append(arg) - - if not found_cc_flag: - upload_py_argv.append(CC_FLAG + GTEST_GROUP) - - # Invokes upload.py with the modified command line flags. - os.execv(upload_py_path, upload_py_argv) - - -if __name__ == '__main__': - main() diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-all.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-all.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-all.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-all.cc @@ -39,6 +39,7 @@ // The following lines pull in the real gtest *.cc files. #include "src/gtest.cc" +#include "src/gtest-assertion-result.cc" #include "src/gtest-death-test.cc" #include "src/gtest-filepath.cc" #include "src/gtest-matchers.cc" diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-assertion-result.cc copy from MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h copy to MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-assertion-result.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/include/gtest/gtest_prod.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-assertion-result.cc @@ -1,4 +1,4 @@ -// Copyright 2006, Google Inc. +// Copyright 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -27,35 +27,55 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// The Google C++ Testing and Mocking Framework (Google Test) // -// Google C++ Testing and Mocking Framework definitions useful in production code. -// GOOGLETEST_CM0003 DO NOT DELETE +// This file defines the AssertionResult type. -#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ -#define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ +#include "gtest/gtest-assertion-result.h" -// When you need to test the private or protected members of a class, -// use the FRIEND_TEST macro to declare your tests as friends of the -// class. For example: -// -// class MyClass { -// private: -// void PrivateMethod(); -// FRIEND_TEST(MyClassTest, PrivateMethodWorks); -// }; -// -// class MyClassTest : public testing::Test { -// // ... -// }; -// -// TEST_F(MyClassTest, PrivateMethodWorks) { -// // Can call MyClass::PrivateMethod() here. -// } -// -// Note: The test class must be in the same namespace as the class being tested. -// For example, putting MyClassTest in an anonymous namespace will not work. +#include +#include + +#include "gtest/gtest-message.h" + +namespace testing { + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != nullptr + ? new ::std::string(*other.message_) + : static_cast< ::std::string*>(nullptr)) {} + +// Swaps two AssertionResults. +void AssertionResult::swap(AssertionResult& other) { + using std::swap; + swap(success_, other.success_); + swap(message_, other.message_); +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != nullptr) negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} -#define FRIEND_TEST(test_case_name, test_name)\ -friend class test_case_name##_##test_name##_Test +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} -#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_ +} // namespace testing diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-death-test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-death-test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-death-test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-death-test.cc @@ -799,8 +799,8 @@ GTEST_DEATH_TEST_CHECK_( ::CreateProcessA( executable_path, const_cast(command_line.c_str()), - nullptr, // Retuned process handle is not inheritable. - nullptr, // Retuned thread handle is not inheritable. + nullptr, // Returned process handle is not inheritable. + nullptr, // Returned thread handle is not inheritable. TRUE, // Child inherits all inheritable handles (for write_handle_). 0x0, // Default creation flags. nullptr, // Inherit the parent's environment. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-internal-inl.h b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-internal-inl.h --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-internal-inl.h +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-internal-inl.h @@ -273,7 +273,7 @@ // Implemented as an explicit loop since std::count_if() in libCstd on // Solaris has a non-standard signature. int count = 0; - for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + for (auto it = c.begin(); it != c.end(); ++it) { if (predicate(*it)) ++count; } @@ -623,7 +623,8 @@ // For example, if Foo() calls Bar(), which in turn calls // CurrentOsStackTraceExceptTop(1), Foo() will be included in the // trace but Bar() and CurrentOsStackTraceExceptTop() won't. - std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + std::string CurrentOsStackTraceExceptTop(int skip_count) + GTEST_NO_INLINE_ GTEST_NO_TAIL_CALL_; // Finds and returns a TestSuite with the given name. If one doesn't // exist, creates one and returns it. @@ -1148,15 +1149,15 @@ // Note that "event=TestCaseStart" is a wire format and has to remain // "case" for compatibility - void OnTestCaseStart(const TestCase& test_case) override { - SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + void OnTestSuiteStart(const TestSuite& test_suite) override { + SendLn(std::string("event=TestCaseStart&name=") + test_suite.name()); } // Note that "event=TestCaseEnd" is a wire format and has to remain // "case" for compatibility - void OnTestCaseEnd(const TestCase& test_case) override { - SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + - "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + void OnTestSuiteEnd(const TestSuite& test_suite) override { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_suite.Passed()) + + "&elapsed_time=" + StreamableToString(test_suite.elapsed_time()) + "ms"); } diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-port.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-port.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-port.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-port.cc @@ -280,10 +280,6 @@ #if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS -void SleepMilliseconds(int n) { - ::Sleep(static_cast(n)); -} - AutoHandle::AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} @@ -322,23 +318,6 @@ return handle_ != nullptr && handle_ != INVALID_HANDLE_VALUE; } -Notification::Notification() - : event_(::CreateEvent(nullptr, // Default security attributes. - TRUE, // Do not reset automatically. - FALSE, // Initially unset. - nullptr)) { // Anonymous event. - GTEST_CHECK_(event_.Get() != nullptr); -} - -void Notification::Notify() { - GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE); -} - -void Notification::WaitForNotification() { - GTEST_CHECK_( - ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0); -} - Mutex::Mutex() : owner_thread_id_(0), type_(kDynamic), @@ -398,12 +377,12 @@ old_crtdbg_flag_ = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); // Set heap allocation block type to _IGNORE_BLOCK so that MS debug CRT // doesn't report mem leak if there's no matching deallocation. - _CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF); + (void)_CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF); } ~MemoryIsNotDeallocated() { // Restore the original _CRTDBG_ALLOC_MEM_DF flag - _CrtSetDbgFlag(old_crtdbg_flag_); + (void)_CrtSetDbgFlag(old_crtdbg_flag_); } private: @@ -650,7 +629,8 @@ &ThreadLocalRegistryImpl::WatcherThreadFunc, reinterpret_cast(new ThreadIdAndHandle(thread_id, thread)), CREATE_SUSPENDED, &watcher_thread_id); - GTEST_CHECK_(watcher_thread != nullptr); + GTEST_CHECK_(watcher_thread != nullptr) + << "CreateThread failed with error " << ::GetLastError() << "."; // Give the watcher thread the same priority as ours to avoid being // blocked by it. ::SetThreadPriority(watcher_thread, diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-printers.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-printers.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-printers.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest-printers.cc @@ -304,6 +304,51 @@ << static_cast(c); } +// gcc/clang __{u,}int128_t +#if defined(__SIZEOF_INT128__) +void PrintTo(__uint128_t v, ::std::ostream* os) { + if (v == 0) { + *os << "0"; + return; + } + + // Buffer large enough for ceil(log10(2^128))==39 and the null terminator + char buf[40]; + char* p = buf + sizeof(buf); + + // Some configurations have a __uint128_t, but no support for built in + // division. Do manual long division instead. + + uint64_t high = static_cast(v >> 64); + uint64_t low = static_cast(v); + + *--p = 0; + while (high != 0 || low != 0) { + uint64_t high_mod = high % 10; + high = high / 10; + // This is the long division algorithm specialized for a divisor of 10 and + // only two elements. + // Notable values: + // 2^64 / 10 == 1844674407370955161 + // 2^64 % 10 == 6 + const uint64_t carry = 6 * high_mod + low % 10; + low = low / 10 + high_mod * 1844674407370955161 + carry / 10; + + char digit = static_cast(carry % 10); + *--p = '0' + digit; + } + *os << p; +} +void PrintTo(__int128_t v, ::std::ostream* os) { + __uint128_t uv = static_cast<__uint128_t>(v); + if (v < 0) { + *os << "-"; + uv = -uv; + } + PrintTo(uv, os); +} +#endif // __SIZEOF_INT128__ + // Prints the given array of characters to the ostream. CharType must be either // char, char8_t, char16_t, char32_t, or wchar_t. // The array starts at begin, the length is len, it may include '\0' characters diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/src/gtest.cc @@ -31,8 +31,6 @@ // The Google C++ Testing and Mocking Framework (Google Test) #include "gtest/gtest.h" -#include "gtest/internal/custom/gtest.h" -#include "gtest/gtest-spi.h" #include #include @@ -47,13 +45,19 @@ #include #include #include +#include #include #include #include #include // NOLINT #include +#include #include +#include "gtest/gtest-assertion-result.h" +#include "gtest/gtest-spi.h" +#include "gtest/internal/custom/gtest.h" + #if GTEST_OS_LINUX # include // NOLINT @@ -177,7 +181,7 @@ // is specified on the command line. bool g_help_flag = false; -// Utilty function to Open File for Writing +// Utility function to Open File for Writing static FILE* OpenFileForWriting(const std::string& output_file) { FILE* fileout = nullptr; FilePath output_file_path(output_file); @@ -315,7 +319,7 @@ GTEST_DEFINE_bool_( recreate_environments_when_repeating, testing::internal::BoolFromGTestEnv("recreate_environments_when_repeating", - true), + false), "Controls whether global test environments are recreated for each repeat " "of the tests. If set to false the global test environments are only set " "up once, for the first iteration, and only torn down once, for the last. " @@ -443,7 +447,7 @@ namespace { // When TEST_P is found without a matching INSTANTIATE_TEST_SUITE_P -// to creates test cases for it, a syntetic test case is +// to creates test cases for it, a synthetic test case is // inserted to report ether an error or a log message. // // This configuration bit will likely be removed at some point. @@ -723,60 +727,119 @@ return true; } -bool UnitTestOptions::MatchesFilter(const std::string& name_str, - const char* filter) { - // The filter is a list of patterns separated by colons (:). - const char* pattern = filter; - while (true) { - // Find the bounds of this pattern. - const char* const next_sep = strchr(pattern, ':'); - const char* const pattern_end = - next_sep != nullptr ? next_sep : pattern + strlen(pattern); - - // Check if this pattern matches name_str. - if (PatternMatchesString(name_str, pattern, pattern_end)) { - return true; - } +namespace { + +bool IsGlobPattern(const std::string& pattern) { + return std::any_of(pattern.begin(), pattern.end(), + [](const char c) { return c == '?' || c == '*'; }); +} - // Give up on this pattern. However, if we found a pattern separator (:), - // advance to the next pattern (skipping over the separator) and restart. - if (next_sep == nullptr) { - return false; +class UnitTestFilter { + public: + UnitTestFilter() = default; + + // Constructs a filter from a string of patterns separated by `:`. + explicit UnitTestFilter(const std::string& filter) { + // By design "" filter matches "" string. + std::vector all_patterns; + SplitString(filter, ':', &all_patterns); + const auto exact_match_patterns_begin = std::partition( + all_patterns.begin(), all_patterns.end(), &IsGlobPattern); + + glob_patterns_.reserve(static_cast( + std::distance(all_patterns.begin(), exact_match_patterns_begin))); + std::move(all_patterns.begin(), exact_match_patterns_begin, + std::inserter(glob_patterns_, glob_patterns_.begin())); + std::move( + exact_match_patterns_begin, all_patterns.end(), + std::inserter(exact_match_patterns_, exact_match_patterns_.begin())); + } + + // Returns true if and only if name matches at least one of the patterns in + // the filter. + bool MatchesName(const std::string& name) const { + return exact_match_patterns_.count(name) > 0 || + std::any_of(glob_patterns_.begin(), glob_patterns_.end(), + [&name](const std::string& pattern) { + return PatternMatchesString( + name, pattern.c_str(), + pattern.c_str() + pattern.size()); + }); + } + + private: + std::vector glob_patterns_; + std::unordered_set exact_match_patterns_; +}; + +class PositiveAndNegativeUnitTestFilter { + public: + // Constructs a positive and a negative filter from a string. The string + // contains a positive filter optionally followed by a '-' character and a + // negative filter. In case only a negative filter is provided the positive + // filter will be assumed "*". + // A filter is a list of patterns separated by ':'. + explicit PositiveAndNegativeUnitTestFilter(const std::string& filter) { + std::vector positive_and_negative_filters; + + // NOTE: `SplitString` always returns a non-empty container. + SplitString(filter, '-', &positive_and_negative_filters); + const auto& positive_filter = positive_and_negative_filters.front(); + + if (positive_and_negative_filters.size() > 1) { + positive_filter_ = UnitTestFilter( + positive_filter.empty() ? kUniversalFilter : positive_filter); + + // TODO(b/214626361): Fail on multiple '-' characters + // For the moment to preserve old behavior we concatenate the rest of the + // string parts with `-` as separator to generate the negative filter. + auto negative_filter_string = positive_and_negative_filters[1]; + for (std::size_t i = 2; i < positive_and_negative_filters.size(); i++) + negative_filter_string = + negative_filter_string + '-' + positive_and_negative_filters[i]; + negative_filter_ = UnitTestFilter(negative_filter_string); + } else { + // In case we don't have a negative filter and positive filter is "" + // we do not use kUniversalFilter by design as opposed to when we have a + // negative filter. + positive_filter_ = UnitTestFilter(positive_filter); } - pattern = next_sep + 1; } - return true; + + // Returns true if and only if test name (this is generated by appending test + // suit name and test name via a '.' character) matches the positive filter + // and does not match the negative filter. + bool MatchesTest(const std::string& test_suite_name, + const std::string& test_name) const { + return MatchesName(test_suite_name + "." + test_name); + } + + // Returns true if and only if name matches the positive filter and does not + // match the negative filter. + bool MatchesName(const std::string& name) const { + return positive_filter_.MatchesName(name) && + !negative_filter_.MatchesName(name); + } + + private: + UnitTestFilter positive_filter_; + UnitTestFilter negative_filter_; +}; +} // namespace + +bool UnitTestOptions::MatchesFilter(const std::string& name_str, + const char* filter) { + return UnitTestFilter(filter).MatchesName(name_str); } // Returns true if and only if the user-specified filter matches the test // suite name and the test name. bool UnitTestOptions::FilterMatchesTest(const std::string& test_suite_name, const std::string& test_name) { - const std::string& full_name = test_suite_name + "." + test_name.c_str(); - // Split --gtest_filter at '-', if there is one, to separate into // positive filter and negative filter portions - std::string str = GTEST_FLAG_GET(filter); - const char* const p = str.c_str(); - const char* const dash = strchr(p, '-'); - std::string positive; - std::string negative; - if (dash == nullptr) { - positive = str.c_str(); // Whole string is a positive filter - negative = ""; - } else { - positive = std::string(p, dash); // Everything up to the dash - negative = std::string(dash + 1); // Everything after the dash - if (positive.empty()) { - // Treat '-test1' as the same as '*-test1' - positive = kUniversalFilter; - } - } - - // A filter is a colon-separated list of patterns. It matches a - // test if any pattern in it matches the test. - return (MatchesFilter(full_name, positive.c_str()) && - !MatchesFilter(full_name, negative.c_str())); + return PositiveAndNegativeUnitTestFilter(GTEST_FLAG_GET(filter)) + .MatchesTest(test_suite_name, test_name); } #if GTEST_HAS_SEH @@ -1207,44 +1270,6 @@ return internal::StringStreamToString(ss_.get()); } -// AssertionResult constructors. -// Used in EXPECT_TRUE/FALSE(assertion_result). -AssertionResult::AssertionResult(const AssertionResult& other) - : success_(other.success_), - message_(other.message_.get() != nullptr - ? new ::std::string(*other.message_) - : static_cast< ::std::string*>(nullptr)) {} - -// Swaps two AssertionResults. -void AssertionResult::swap(AssertionResult& other) { - using std::swap; - swap(success_, other.success_); - swap(message_, other.message_); -} - -// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. -AssertionResult AssertionResult::operator!() const { - AssertionResult negation(!success_); - if (message_.get() != nullptr) negation << *message_; - return negation; -} - -// Makes a successful assertion result. -AssertionResult AssertionSuccess() { - return AssertionResult(true); -} - -// Makes a failed assertion result. -AssertionResult AssertionFailure() { - return AssertionResult(false); -} - -// Makes a failed assertion result with the given failure message. -// Deprecated; use AssertionFailure() << message. -AssertionResult AssertionFailure(const Message& message) { - return AssertionFailure() << message; -} - namespace internal { namespace edit_distance { @@ -2855,20 +2880,20 @@ // Creates the test object, runs it, records its result, and then // deletes it. void TestInfo::Run() { - if (!should_run_) return; + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + if (!should_run_) { + if (is_disabled_) repeater->OnTestDisabled(*this); + return; + } // Tells UnitTest where to store test result. internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); impl->set_current_test_info(this); - TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); - // Notifies the unit test event listeners that a test is about to start. repeater->OnTestStart(*this); - result_.set_start_timestamp(internal::GetTimeInMillis()); internal::Timer timer; - impl->os_stack_trace_getter()->UponLeavingGTest(); // Creates the test object. @@ -3033,10 +3058,16 @@ internal::HandleExceptionsInMethodIfSupported( this, &TestSuite::RunSetUpTestSuite, "SetUpTestSuite()"); + const bool skip_all = ad_hoc_test_result().Failed(); + start_timestamp_ = internal::GetTimeInMillis(); internal::Timer timer; for (int i = 0; i < total_test_count(); i++) { - GetMutableTestInfo(i)->Run(); + if (skip_all) { + GetMutableTestInfo(i)->Skip(); + } else { + GetMutableTestInfo(i)->Run(); + } if (GTEST_FLAG_GET(fail_fast) && GetMutableTestInfo(i)->result()->Failed()) { for (int j = i + 1; j < total_test_count(); j++) { @@ -3390,6 +3421,7 @@ #endif // OnTestCaseStart void OnTestStart(const TestInfo& test_info) override; + void OnTestDisabled(const TestInfo& test_info) override; void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; @@ -3489,6 +3521,13 @@ fflush(stdout); } +void PrettyUnitTestResultPrinter::OnTestDisabled(const TestInfo& test_info) { + ColoredPrintf(GTestColor::kYellow, "[ DISABLED ] "); + PrintTestName(test_info.test_suite_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + // Called after an assertion failure. void PrettyUnitTestResultPrinter::OnTestPartResult( const TestPartResult& result) { @@ -3691,6 +3730,7 @@ #endif // OnTestCaseStart void OnTestStart(const TestInfo& /*test_info*/) override {} + void OnTestDisabled(const TestInfo& /*test_info*/) override {} void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; @@ -3797,6 +3837,7 @@ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ void OnTestSuiteStart(const TestSuite& parameter) override; void OnTestStart(const TestInfo& test_info) override; + void OnTestDisabled(const TestInfo& test_info) override; void OnTestPartResult(const TestPartResult& result) override; void OnTestEnd(const TestInfo& test_info) override; // Legacy API is deprecated but still available @@ -3867,6 +3908,7 @@ #endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_ GTEST_REPEATER_METHOD_(OnTestSuiteStart, TestSuite) GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestDisabled, TestInfo) GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) @@ -3917,12 +3959,13 @@ private: // Is c a whitespace character that is normalized to a space character // when it appears in an XML attribute value? - static bool IsNormalizableWhitespace(char c) { - return c == 0x9 || c == 0xA || c == 0xD; + static bool IsNormalizableWhitespace(unsigned char c) { + return c == '\t' || c == '\n' || c == '\r'; } // May c appear in a well-formed XML document? - static bool IsValidXmlCharacter(char c) { + // https://www.w3.org/TR/REC-xml/#charsets + static bool IsValidXmlCharacter(unsigned char c) { return IsNormalizableWhitespace(c) || c >= 0x20; } @@ -4061,8 +4104,9 @@ m << '"'; break; default: - if (IsValidXmlCharacter(ch)) { - if (is_attribute && IsNormalizableWhitespace(ch)) + if (IsValidXmlCharacter(static_cast(ch))) { + if (is_attribute && + IsNormalizableWhitespace(static_cast(ch))) m << "&#x" << String::FormatByte(static_cast(ch)) << ";"; else @@ -4083,7 +4127,7 @@ std::string output; output.reserve(str.size()); for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) - if (IsValidXmlCharacter(*it)) + if (IsValidXmlCharacter(static_cast(*it))) output.push_back(*it); return output; @@ -4091,7 +4135,6 @@ // The following routines generate an XML representation of a UnitTest // object. -// GOOGLETEST_CM0009 DO NOT DELETE // // This is how Google Test concepts map to the DTD: // @@ -4437,15 +4480,15 @@ return; } - *stream << "<" << kProperties << ">\n"; + *stream << " <" << kProperties << ">\n"; for (int i = 0; i < result.test_property_count(); ++i) { const TestProperty& property = result.GetTestProperty(i); - *stream << "<" << kProperty; + *stream << " <" << kProperty; *stream << " name=\"" << EscapeXmlAttribute(property.key()) << "\""; *stream << " value=\"" << EscapeXmlAttribute(property.value()) << "\""; *stream << "/>\n"; } - *stream << "\n"; + *stream << " \n"; } // End XmlUnitTestResultPrinter @@ -5039,7 +5082,7 @@ // create the file with a single "0" character in it. I/O // errors are ignored as there's nothing better we can do and we // don't want to fail the test because of this. - FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + FILE* pfile = posix::FOpen(premature_exit_filepath_.c_str(), "w"); fwrite("0", 1, 1, pfile); fclose(pfile); } @@ -5737,9 +5780,9 @@ auto* const new_test_suite = new TestSuite(test_suite_name, type_param, set_up_tc, tear_down_tc); + const UnitTestFilter death_test_suite_filter(kDeathTestSuiteFilter); // Is this a death test suite? - if (internal::UnitTestOptions::MatchesFilter(test_suite_name, - kDeathTestSuiteFilter)) { + if (death_test_suite_filter.MatchesName(test_suite_name)) { // Yes. Inserts the test suite after the last death test suite // defined so far. This only works when the test suites haven't // been shuffled. Otherwise we may end up running a death test @@ -5818,9 +5861,7 @@ return true; } - random_seed_ = GTEST_FLAG_GET(shuffle) - ? GetRandomSeedFromFlag(GTEST_FLAG_GET(random_seed)) - : 0; + random_seed_ = GetRandomSeedFromFlag(GTEST_FLAG_GET(random_seed)); // True if and only if at least one test has failed. bool failed = false; @@ -6074,6 +6115,9 @@ const int32_t shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + const PositiveAndNegativeUnitTestFilter gtest_flag_filter( + GTEST_FLAG_GET(filter)); + const UnitTestFilter disable_test_filter(kDisableTestFilter); // num_runnable_tests are the number of tests that will // run across all shards (i.e., match filter and are not disabled). // num_selected_tests are the number of tests to be run on @@ -6089,14 +6133,13 @@ const std::string test_name(test_info->name()); // A test is disabled if test suite name or test name matches // kDisableTestFilter. - const bool is_disabled = internal::UnitTestOptions::MatchesFilter( - test_suite_name, kDisableTestFilter) || - internal::UnitTestOptions::MatchesFilter( - test_name, kDisableTestFilter); + const bool is_disabled = + disable_test_filter.MatchesName(test_suite_name) || + disable_test_filter.MatchesName(test_name); test_info->is_disabled_ = is_disabled; - const bool matches_filter = internal::UnitTestOptions::FilterMatchesTest( - test_suite_name, test_name); + const bool matches_filter = + gtest_flag_filter.MatchesTest(test_suite_name, test_name); test_info->matches_filter_ = matches_filter; const bool is_runnable = @@ -6269,8 +6312,8 @@ // For example, if Foo() calls Bar(), which in turn calls // GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in // the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. -std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, - int skip_count) { +GTEST_NO_INLINE_ GTEST_NO_TAIL_CALL_ std::string +GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, int skip_count) { // We pass skip_count + 1 to skip this wrapper function in addition // to what the user really wants to skip. return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/BUILD.bazel b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/BUILD.bazel --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/BUILD.bazel +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/BUILD.bazel @@ -30,7 +30,6 @@ # # Bazel BUILD for The Google C++ Testing Framework (Google Test) -load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_python//python:defs.bzl", "py_library", "py_test") licenses(["notice"]) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-break-on-failure-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-break-on-failure-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-break-on-failure-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-break-on-failure-unittest.py @@ -39,7 +39,7 @@ """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-catch-exceptions-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-catch-exceptions-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-catch-exceptions-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-catch-exceptions-test.py @@ -35,7 +35,7 @@ Google Test) and verifies their output. """ -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' @@ -147,19 +147,19 @@ self.assertTrue( 'CxxExceptionInConstructorTest::TearDownTestSuite() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) - self.assertTrue( + self.assertFalse( 'CxxExceptionInSetUpTestSuiteTest test body ' 'called as expected.' in EX_BINARY_OUTPUT, EX_BINARY_OUTPUT) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-color-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-color-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-color-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-color-test.py @@ -32,7 +32,7 @@ """Verifies that Google Test correctly determines whether to use colors.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-death-test-test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-death-test-test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-death-test-test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-death-test-test.cc @@ -668,35 +668,19 @@ # if GTEST_OS_WINDOWS -// Tests that EXPECT_DEBUG_DEATH works as expected when in debug mode -// the Windows CRT crashes the process with an assertion failure. +// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode +// In debug mode, the calls to _CrtSetReportMode and _CrtSetReportFile enable +// the dumping of assertions to stderr. Tests that EXPECT_DEATH works as +// expected when in CRT debug mode (compiled with /MTd or /MDd, which defines +// _DEBUG) the Windows CRT crashes the process with an assertion failure. // 1. Asserts on death. // 2. Has no side effect (doesn't pop up a window or wait for user input). -// -// And in opt mode, it: -// 1. Has side effects but does not assert. +#ifdef _DEBUG TEST_F(TestForDeathTest, CRTDebugDeath) { - int sideeffect = 0; - - // Put the regex in a local variable to make sure we don't get an "unused" - // warning in opt mode. - const char* regex = "dup.* : Assertion failed"; - - EXPECT_DEBUG_DEATH(DieInCRTDebugElse12(&sideeffect), regex) + EXPECT_DEATH(DieInCRTDebugElse12(nullptr), "dup.* : Assertion failed") << "Must accept a streamed message"; - -# ifdef NDEBUG - - // Checks that the assignment occurs in opt mode (sideeffect). - EXPECT_EQ(12, sideeffect); - -# else - - // Checks that the assignment does not occur in dbg mode (no sideeffect). - EXPECT_EQ(0, sideeffect); - -# endif } +#endif // _DEBUG # endif // GTEST_OS_WINDOWS diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-env-var-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-env-var-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-env-var-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-env-var-test.py @@ -32,7 +32,7 @@ """Verifies that Google Test correctly parses environment variables.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils IS_WINDOWS = os.name == 'nt' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-failfast-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-failfast-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-failfast-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-failfast-unittest.py @@ -41,7 +41,7 @@ """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-filter-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-filter-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-filter-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-filter-unittest.py @@ -47,7 +47,7 @@ except ImportError: pass import sys -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-global-environment-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-global-environment-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-global-environment-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-global-environment-unittest.py @@ -36,7 +36,7 @@ """ import re -import gtest_test_utils +from googletest.test import gtest_test_utils def RunAndReturnOutput(args=None): @@ -71,10 +71,13 @@ def testEnvironmentSetUpAndTornDownForEachRepeat(self): """Tests the behavior of test environments and gtest_repeat.""" - txt = RunAndReturnOutput(['--gtest_repeat=2']) + # When --gtest_recreate_environments_when_repeating is true, the global test + # environment should be set up and torn down for each iteration. + txt = RunAndReturnOutput([ + '--gtest_repeat=2', + '--gtest_recreate_environments_when_repeating=true', + ]) - # By default, with gtest_repeat=2, the global test environment should be set - # up and torn down for each iteration. expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' @@ -97,13 +100,12 @@ def testEnvironmentSetUpAndTornDownOnce(self): """Tests environment and --gtest_recreate_environments_when_repeating.""" + # By default the environment should only be set up and torn down once, at + # the start and end of the test respectively. txt = RunAndReturnOutput([ - '--gtest_repeat=2', '--gtest_recreate_environments_when_repeating=false' + '--gtest_repeat=2', ]) - # When --gtest_recreate_environments_when_repeating is false, the test - # environment should only be set up and torn down once, at the start and - # end of the test respectively. expected_pattern = ('(.|\n)*' r'Repeating all tests \(iteration 1\)' '(.|\n)*' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-outfiles-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-outfiles-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-outfiles-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-outfiles-test.py @@ -32,8 +32,8 @@ import json import os -import gtest_json_test_utils -import gtest_test_utils +from googletest.test import gtest_json_test_utils +from googletest.test import gtest_test_utils GTEST_OUTPUT_SUBDIR = 'json_outfiles' GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-output-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-output-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-output-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-json-output-unittest.py @@ -37,8 +37,8 @@ import re import sys -import gtest_json_test_utils -import gtest_test_utils +from googletest.test import gtest_json_test_utils +from googletest.test import gtest_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-list-tests-unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-list-tests-unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-list-tests-unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-list-tests-unittest.py @@ -38,7 +38,7 @@ """ import re -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-listener-test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-listener-test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-listener-test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-listener-test.cc @@ -285,6 +285,7 @@ << "AddGlobalTestEnvironment should not generate any events itself."; GTEST_FLAG_SET(repeat, 2); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); int ret_val = RUN_ALL_TESTS(); #ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_ diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test-golden-lin.txt b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test-golden-lin.txt --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test-golden-lin.txt +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test-golden-lin.txt @@ -12,7 +12,7 @@ 3 Stack trace: (omitted) -[==========] Running 88 tests from 41 test suites. +[==========] Running 89 tests from 42 test suites. [----------] Global test environment set-up. FooEnvironment::SetUp() called. BarEnvironment::SetUp() called. @@ -956,6 +956,17 @@ ~DynamicFixture() [ FAILED ] BadDynamicFixture2.Derived DynamicFixture::TearDownTestSuite +[----------] 1 test from TestSuiteThatFailsToSetUp +googletest-output-test_.cc:#: Failure +Value of: false + Actual: false +Expected: true +Stack trace: (omitted) + +[ RUN ] TestSuiteThatFailsToSetUp.ShouldNotRun +googletest-output-test_.cc:#: Skipped + +[ SKIPPED ] TestSuiteThatFailsToSetUp.ShouldNotRun [----------] 1 test from PrintingFailingParams/FailingParamTest [ RUN ] PrintingFailingParams/FailingParamTest.Fails/0 googletest-output-test_.cc:#: Failure @@ -1032,8 +1043,10 @@ Expected fatal failure. Stack trace: (omitted) -[==========] 88 tests from 41 test suites ran. +[==========] 89 tests from 42 test suites ran. [ PASSED ] 31 tests. +[ SKIPPED ] 1 test, listed below: +[ SKIPPED ] TestSuiteThatFailsToSetUp.ShouldNotRun [ FAILED ] 57 tests, listed below: [ FAILED ] NonfatalFailureTest.EscapesStringOperands [ FAILED ] NonfatalFailureTest.DiffForLongStrings @@ -1094,6 +1107,9 @@ [ FAILED ] GoogleTestVerification.UninstantiatedTypeParameterizedTestSuite 57 FAILED TESTS +[ FAILED ] TestSuiteThatFailsToSetUp: SetUpTestSuite or TearDownTestSuite + + 1 FAILED TEST SUITE  YOU HAVE 1 DISABLED TEST Note: Google Test filter = FatalFailureTest.*:LoggingTest.* diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test.py @@ -42,7 +42,7 @@ import os import re import sys -import gtest_test_utils +from googletest.test import gtest_test_utils # The flag for generating the golden file diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test_.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test_.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test_.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-output-test_.cc @@ -1060,6 +1060,14 @@ } }; +class TestSuiteThatFailsToSetUp : public testing::Test { + public: + static void SetUpTestSuite() { EXPECT_TRUE(false); } +}; +TEST_F(TestSuiteThatFailsToSetUp, ShouldNotRun) { + std::abort(); +} + // The main function. // // The idea is to use Google Test to run all the tests we have defined (some diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name1-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name1-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name1-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name1-test.py @@ -30,7 +30,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name1-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name2-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name2-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name2-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-param-test-invalid-name2-test.py @@ -30,7 +30,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'googletest-param-test-invalid-name2-test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-port-test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-port-test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-port-test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-port-test.cc @@ -36,8 +36,10 @@ # include #endif // GTEST_OS_MAC +#include // NOLINT #include #include +#include // NOLINT #include // For std::pair and std::make_pair. #include @@ -333,7 +335,7 @@ break; } - SleepMilliseconds(100); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); } // Retry if an arbitrary other thread was created or destroyed. @@ -1050,7 +1052,7 @@ int temp = value_; { // We need to put up a memory barrier to prevent reads and writes to - // value_ rearranged with the call to SleepMilliseconds when observed + // value_ rearranged with the call to sleep_for when observed // from other threads. #if GTEST_HAS_PTHREAD // On POSIX, locking a mutex puts up a memory barrier. We cannot use @@ -1061,7 +1063,8 @@ pthread_mutex_init(&memory_barrier_mutex, nullptr)); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&memory_barrier_mutex)); - SleepMilliseconds(static_cast(random_.Generate(30))); + std::this_thread::sleep_for( + std::chrono::milliseconds(random_.Generate(30))); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&memory_barrier_mutex)); GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&memory_barrier_mutex)); @@ -1069,7 +1072,8 @@ // On Windows, performing an interlocked access puts up a memory barrier. volatile LONG dummy = 0; ::InterlockedIncrement(&dummy); - SleepMilliseconds(static_cast(random_.Generate(30))); + std::this_thread::sleep_for( + std::chrono::milliseconds(random_.Generate(30))); ::InterlockedIncrement(&dummy); #else # error "Memory barrier not implemented on this platform." diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-printers-test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-printers-test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-printers-test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-printers-test.cc @@ -449,12 +449,40 @@ #endif // !GTEST_OS_WINDOWS } +// gcc/clang __{u,}int128_t values. +#if defined(__SIZEOF_INT128__) +TEST(PrintBuiltInTypeTest, Int128) { + // Small ones + EXPECT_EQ("0", Print(__int128_t{0})); + EXPECT_EQ("0", Print(__uint128_t{0})); + EXPECT_EQ("12345", Print(__int128_t{12345})); + EXPECT_EQ("12345", Print(__uint128_t{12345})); + EXPECT_EQ("-12345", Print(__int128_t{-12345})); + + // Large ones + EXPECT_EQ("340282366920938463463374607431768211455", Print(~__uint128_t{})); + __int128_t max_128 = static_cast<__int128_t>(~__uint128_t{} / 2); + EXPECT_EQ("-170141183460469231731687303715884105728", Print(~max_128)); + EXPECT_EQ("170141183460469231731687303715884105727", Print(max_128)); +} +#endif // __SIZEOF_INT128__ + // Floating-points. TEST(PrintBuiltInTypeTest, FloatingPoints) { EXPECT_EQ("1.5", Print(1.5f)); // float EXPECT_EQ("-2.5", Print(-2.5)); // double } +#if GTEST_HAS_RTTI +TEST(PrintBuiltInTypeTest, TypeInfo) { + struct MyStruct {}; + auto res = Print(typeid(MyStruct{})); + // We can't guarantee that we can demangle the name, but either name should + // contain the substring "MyStruct". + EXPECT_NE(res.find("MyStruct"), res.npos) << res; +} +#endif // GTEST_HAS_RTTI + // Since ::std::stringstream::operator<<(const void *) formats the pointer // output differently with different compilers, we have to create the expected // output first and use it as our expectation. @@ -1873,6 +1901,7 @@ #if GTEST_INTERNAL_HAS_OPTIONAL TEST(PrintOptionalTest, Basic) { + EXPECT_EQ("(nullopt)", PrintToString(internal::Nullopt())); internal::Optional value; EXPECT_EQ("(nullopt)", PrintToString(value)); value = {7}; diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-setuptestsuite-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-setuptestsuite-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-setuptestsuite-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-setuptestsuite-test.py @@ -31,7 +31,7 @@ """Verifies that SetUpTestSuite and TearDownTestSuite errors are noticed.""" -import gtest_test_utils +from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath( 'googletest-setuptestsuite-test_') diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-shuffle-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-shuffle-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-shuffle-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-shuffle-test.py @@ -31,7 +31,7 @@ """Verifies that test shuffling works.""" import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Command to run the googletest-shuffle-test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-shuffle-test_') diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-throw-on-failure-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-throw-on-failure-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-throw-on-failure-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-throw-on-failure-test.py @@ -36,7 +36,7 @@ """ import os -import gtest_test_utils +from googletest.test import gtest_test_utils # Constants. diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-uninitialized-test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-uninitialized-test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-uninitialized-test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/googletest-uninitialized-test.py @@ -31,7 +31,7 @@ """Verifies that Google Test warns the user when not initialized properly.""" -import gtest_test_utils +from googletest.test import gtest_test_utils COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-uninitialized-test_') diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_help_test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_help_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_help_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_help_test.py @@ -39,12 +39,13 @@ import os import re -import gtest_test_utils +from googletest.test import gtest_test_utils IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_GNUHURD = os.name == 'posix' and os.uname()[0] == 'GNU' IS_GNUKFREEBSD = os.name == 'posix' and os.uname()[0] == 'GNU/kFreeBSD' +IS_OPENBSD = os.name == 'posix' and os.uname()[0] == 'OpenBSD' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') @@ -113,7 +114,7 @@ self.assertEquals(0, exit_code) self.assert_(HELP_REGEX.search(output), output) - if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD: + if IS_LINUX or IS_GNUHURD or IS_GNUKFREEBSD or IS_OPENBSD: self.assert_(STREAM_RESULT_TO_FLAG in output, output) else: self.assert_(STREAM_RESULT_TO_FLAG not in output, output) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_list_output_unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_list_output_unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_list_output_unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_list_output_unittest.py @@ -40,7 +40,7 @@ import os import re -import gtest_test_utils +from googletest.test import gtest_test_utils GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = '--gtest_output' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_pred_impl_unittest.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_pred_impl_unittest.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_pred_impl_unittest.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_pred_impl_unittest.cc @@ -27,7 +27,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This file is AUTOMATICALLY GENERATED on 11/05/2019 by command +// This file is AUTOMATICALLY GENERATED on 07/21/2021 by command // 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! // Regression test for gtest_pred_impl.h @@ -136,7 +136,7 @@ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -530,7 +530,7 @@ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -966,7 +966,7 @@ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -1444,7 +1444,7 @@ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; @@ -1964,7 +1964,7 @@ // Verifies that the control flow in the test function is expected. if (expected_to_finish_ && !finished_) { - FAIL() << "The predicate assertion unexpactedly aborted the test."; + FAIL() << "The predicate assertion unexpectedly aborted the test."; } else if (!expected_to_finish_ && finished_) { FAIL() << "The failed predicate assertion didn't abort the test " "as expected."; diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_repeat_test.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_repeat_test.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_repeat_test.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_repeat_test.cc @@ -142,6 +142,7 @@ // Tests the behavior of Google Test when --gtest_repeat has the given value. void TestRepeat(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); ResetCounts(); GTEST_CHECK_INT_EQ_(repeat > 0 ? 1 : 0, RUN_ALL_TESTS()); @@ -152,6 +153,7 @@ // set of tests. void TestRepeatWithEmptyFilter(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "None"); ResetCounts(); @@ -163,6 +165,7 @@ // successful tests. void TestRepeatWithFilterForSuccessfulTests(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "*-*ShouldFail"); ResetCounts(); @@ -179,6 +182,7 @@ // failed tests. void TestRepeatWithFilterForFailedTests(int repeat) { GTEST_FLAG_SET(repeat, repeat); + GTEST_FLAG_SET(recreate_environments_when_repeating, true); GTEST_FLAG_SET(filter, "*ShouldFail"); ResetCounts(); diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_check_output_test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_check_output_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_check_output_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_check_output_test.py @@ -35,7 +35,7 @@ import re -import gtest_test_utils +from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_skip_test') diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_environment_check_output_test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_environment_check_output_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_environment_check_output_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_skip_environment_check_output_test.py @@ -33,7 +33,7 @@ output. """ -import gtest_test_utils +from googletest.test import gtest_test_utils # Path to the gtest_skip_in_environment_setup_test binary EXE_PATH = gtest_test_utils.GetTestExecutablePath( diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_test_utils.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_test_utils.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_test_utils.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_test_utils.py @@ -32,6 +32,7 @@ # pylint: disable-msg=C6204 import os +import subprocess import sys IS_WINDOWS = os.name == 'nt' @@ -42,13 +43,6 @@ import shutil import tempfile import unittest as _test_module - -try: - import subprocess - _SUBPROCESS_MODULE_AVAILABLE = True -except: - import popen2 - _SUBPROCESS_MODULE_AVAILABLE = False # pylint: enable-msg=C6204 GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' @@ -173,7 +167,7 @@ 'Unable to find the test binary "%s". Please make sure to provide\n' 'a path to the binary via the --build_dir flag or the BUILD_DIR\n' 'environment variable.' % path) - print >> sys.stderr, message + print(message, file=sys.stderr) sys.exit(1) return path @@ -224,69 +218,18 @@ combined in a string. """ - # The subprocess module is the preferrable way of running programs - # since it is available and behaves consistently on all platforms, - # including Windows. But it is only available starting in python 2.4. - # In earlier python versions, we revert to the popen2 module, which is - # available in python 2.0 and later but doesn't provide required - # functionality (Popen4) under Windows. This allows us to support Mac - # OS X 10.4 Tiger, which has python 2.3 installed. - if _SUBPROCESS_MODULE_AVAILABLE: - if capture_stderr: - stderr = subprocess.STDOUT - else: - stderr = subprocess.PIPE - - p = subprocess.Popen(command, - stdout=subprocess.PIPE, stderr=stderr, - cwd=working_dir, universal_newlines=True, env=env) - # communicate returns a tuple with the file object for the child's - # output. - self.output = p.communicate()[0] - self._return_code = p.returncode + if capture_stderr: + stderr = subprocess.STDOUT else: - old_dir = os.getcwd() - - def _ReplaceEnvDict(dest, src): - # Changes made by os.environ.clear are not inheritable by child - # processes until Python 2.6. To produce inheritable changes we have - # to delete environment items with the del statement. - for key in dest.keys(): - del dest[key] - dest.update(src) - - # When 'env' is not None, backup the environment variables and replace - # them with the passed 'env'. When 'env' is None, we simply use the - # current 'os.environ' for compatibility with the subprocess.Popen - # semantics used above. - if env is not None: - old_environ = os.environ.copy() - _ReplaceEnvDict(os.environ, env) - - try: - if working_dir is not None: - os.chdir(working_dir) - if capture_stderr: - p = popen2.Popen4(command) - else: - p = popen2.Popen3(command) - p.tochild.close() - self.output = p.fromchild.read() - ret_code = p.wait() - finally: - os.chdir(old_dir) - - # Restore the old environment variables - # if they were replaced. - if env is not None: - _ReplaceEnvDict(os.environ, old_environ) - - # Converts ret_code to match the semantics of - # subprocess.Popen.returncode. - if os.WIFSIGNALED(ret_code): - self._return_code = -os.WTERMSIG(ret_code) - else: # os.WIFEXITED(ret_code) should return True here. - self._return_code = os.WEXITSTATUS(ret_code) + stderr = subprocess.PIPE + + p = subprocess.Popen(command, + stdout=subprocess.PIPE, stderr=stderr, + cwd=working_dir, universal_newlines=True, env=env) + # communicate returns a tuple with the file object for the child's + # output. + self.output = p.communicate()[0] + self._return_code = p.returncode if bool(self._return_code & 0x80000000): self.terminated_by_signal = True diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_testbridge_test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_testbridge_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_testbridge_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_testbridge_test.py @@ -31,7 +31,7 @@ import os -import gtest_test_utils +from googletest.test import gtest_test_utils binary_name = 'gtest_testbridge_test_' COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_unittest.cc b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_unittest.cc --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_unittest.cc +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_unittest.cc @@ -111,15 +111,15 @@ EXPECT_EQ("event=TestIterationEnd&passed=1&elapsed_time=0ms\n", *output()); } -TEST_F(StreamingListenerTest, OnTestCaseStart) { +TEST_F(StreamingListenerTest, OnTestSuiteStart) { *output() = ""; - streamer_.OnTestCaseStart(TestCase("FooTest", "Bar", nullptr, nullptr)); + streamer_.OnTestSuiteStart(TestSuite("FooTest", "Bar", nullptr, nullptr)); EXPECT_EQ("event=TestCaseStart&name=FooTest\n", *output()); } -TEST_F(StreamingListenerTest, OnTestCaseEnd) { +TEST_F(StreamingListenerTest, OnTestSuiteEnd) { *output() = ""; - streamer_.OnTestCaseEnd(TestCase("FooTest", "Bar", nullptr, nullptr)); + streamer_.OnTestSuiteEnd(TestSuite("FooTest", "Bar", nullptr, nullptr)); EXPECT_EQ("event=TestCaseEnd&passed=1&elapsed_time=0ms\n", *output()); } @@ -404,7 +404,7 @@ // Tests FormatEpochTimeInMillisAsIso8601(). The correctness of conversion // for particular dates below was verified in Python using -// datetime.datetime.fromutctimestamp(/1000). +// datetime.datetime.fromutctimestamp(/1000). // FormatEpochTimeInMillisAsIso8601 depends on the current timezone, so we // have to set up a particular timezone to obtain predictable results. @@ -450,6 +450,12 @@ tzset(); GTEST_DISABLE_MSC_WARNINGS_POP_() #else +#if GTEST_OS_LINUX_ANDROID && __ANDROID_API__ < 21 + // Work around KitKat bug in tzset by setting "UTC" before setting "UTC+00". + // See https://github.com/android/ndk/issues/1604. + setenv("TZ", "UTC", 1); + tzset(); +#endif if (time_zone) { setenv(("TZ"), time_zone, 1); } else { @@ -2017,7 +2023,7 @@ } // Tests that property recording functions in UnitTest outside of tests -// functions correcly. Creating a separate instance of UnitTest ensures it +// functions correctly. Creating a separate instance of UnitTest ensures it // is in a state similar to the UnitTest's singleton's between tests. class UnitTestRecordPropertyTest : public testing::internal::UnitTestRecordPropertyTestHelper { @@ -7793,3 +7799,35 @@ FAIL() << "Didn't find the test!"; } + +// Test that the pattern globbing algorithm is linear. If not, this test should +// time out. +TEST(PatternGlobbingTest, MatchesFilterLinearRuntime) { + std::string name(100, 'a'); // Construct the string (a^100)b + name.push_back('b'); + + std::string pattern; // Construct the string ((a*)^100)b + for (int i = 0; i < 100; ++i) { + pattern.append("a*"); + } + pattern.push_back('b'); + + EXPECT_TRUE( + testing::internal::UnitTestOptions::MatchesFilter(name, pattern.c_str())); +} + +TEST(PatternGlobbingTest, MatchesFilterWithMultiplePatterns) { + const std::string name = "aaaa"; + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "a*")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "a*:")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab:")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter(name, "ab:a*")); +} + +TEST(PatternGlobbingTest, MatchesFilterEdgeCases) { + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter("", "*a")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter("", "*")); + EXPECT_FALSE(testing::internal::UnitTestOptions::MatchesFilter("a", "")); + EXPECT_TRUE(testing::internal::UnitTestOptions::MatchesFilter("", "")); +} diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_outfiles_test.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_outfiles_test.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_outfiles_test.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_outfiles_test.py @@ -33,8 +33,8 @@ import os from xml.dom import minidom, Node -import gtest_test_utils -import gtest_xml_test_utils +from googletest.test import gtest_test_utils +from googletest.test import gtest_xml_test_utils GTEST_OUTPUT_SUBDIR = "xml_outfiles" GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_output_unittest.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_output_unittest.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_output_unittest.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_output_unittest.py @@ -38,8 +38,8 @@ import sys from xml.dom import minidom, Node -import gtest_test_utils -import gtest_xml_test_utils +from googletest.test import gtest_test_utils +from googletest.test import gtest_xml_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' diff --git a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_test_utils.py b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_test_utils.py --- a/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_test_utils.py +++ b/MicroBenchmarks/libs/benchmark/googletest/googletest/test/gtest_xml_test_utils.py @@ -31,7 +31,7 @@ import re from xml.dom import minidom, Node -import gtest_test_utils +from googletest.test import gtest_test_utils GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' diff --git a/MicroBenchmarks/libs/benchmark/include/benchmark/benchmark.h b/MicroBenchmarks/libs/benchmark/include/benchmark/benchmark.h --- a/MicroBenchmarks/libs/benchmark/include/benchmark/benchmark.h +++ b/MicroBenchmarks/libs/benchmark/include/benchmark/benchmark.h @@ -34,7 +34,7 @@ BENCHMARK(BM_StringCopy); // Augment the main() program to invoke benchmarks if specified -// via the --benchmarks command line flag. E.g., +// via the --benchmark_filter command line flag. E.g., // my_unittest --benchmark_filter=all // my_unittest --benchmark_filter=BM_StringCreation // my_unittest --benchmark_filter=String @@ -140,13 +140,13 @@ do can be wrapped in a check against the thread index: static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Setup code here. } for (auto _ : state) { // Run the test as normal. } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Teardown code here. } } @@ -180,12 +180,15 @@ #include #include #include +#include #include #include #include #include #include +#include "benchmark/export.h" + #if defined(BENCHMARK_HAS_CXX11) #include #include @@ -238,16 +241,24 @@ #define BENCHMARK_INTERNAL_TOSTRING2(x) #x #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) +// clang-format off #if defined(__GNUC__) || defined(__clang__) #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop") #else #define BENCHMARK_BUILTIN_EXPECT(x, y) x #define BENCHMARK_DEPRECATED_MSG(msg) #define BENCHMARK_WARNING_MSG(msg) \ __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ __LINE__) ") : warning note: " msg)) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING +#define BENCHMARK_RESTORE_DEPRECATED_WARNING #endif +// clang-format on #if defined(__GNUC__) && !defined(__clang__) #define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) @@ -271,22 +282,46 @@ #define BENCHMARK_OVERRIDE #endif +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + namespace benchmark { class BenchmarkReporter; -class MemoryManager; -void Initialize(int* argc, char** argv); -void Shutdown(); +BENCHMARK_EXPORT void Initialize(int* argc, char** argv, + void (*HelperPrinterf)() = NULL); +BENCHMARK_EXPORT void Shutdown(); // Report to stdout all arguments in 'argv' as unrecognized except the first. // Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). -bool ReportUnrecognizedArguments(int argc, char** argv); +BENCHMARK_EXPORT bool ReportUnrecognizedArguments(int argc, char** argv); + +// Returns the current value of --benchmark_filter. +BENCHMARK_EXPORT std::string GetBenchmarkFilter(); + +// Sets a new value to --benchmark_filter. (This will override this flag's +// current value). +// Should be called after `benchmark::Initialize()`, as +// `benchmark::Initialize()` will override the flag's value. +BENCHMARK_EXPORT void SetBenchmarkFilter(std::string value); + +// Creates a default display reporter. Used by the library when no display +// reporter is provided, but also made available for external use in case a +// custom reporter should respect the `--benchmark_format` flag as a fallback +BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); // Generate a list of benchmarks matching the specified --benchmark_filter flag // and if --benchmark_list_tests is specified return after printing the name // of each matching benchmark. Otherwise run each matching benchmark and // report the results. // +// spec : Specify the benchmarks to run. If users do not specify this arg, +// then the value of FLAGS_benchmark_filter +// will be used. +// // The second and third overload use the specified 'display_reporter' and // 'file_reporter' respectively. 'file_reporter' will write to the file // specified @@ -294,16 +329,82 @@ // 'file_reporter' is ignored. // // RETURNS: The number of matching benchmarks. -size_t RunSpecifiedBenchmarks(); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(std::string spec); + +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::string spec); + +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks( + BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, std::string spec); + +// TimeUnit is passed to a benchmark in order to specify the order of magnitude +// for the measured time. +enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; + +BENCHMARK_EXPORT TimeUnit GetDefaultTimeUnit(); + +// Sets the default time unit the benchmarks use +// Has to be called before the benchmark loop to take effect +BENCHMARK_EXPORT void SetDefaultTimeUnit(TimeUnit unit); + +// If a MemoryManager is registered (via RegisterMemoryManager()), +// it can be used to collect and report allocation metrics for a run of the +// benchmark. +class MemoryManager { + public: + static const int64_t TombstoneValue; + + struct Result { + Result() + : num_allocs(0), + max_bytes_used(0), + total_allocated_bytes(TombstoneValue), + net_heap_growth(TombstoneValue) {} + + // The number of allocations made in total between Start and Stop. + int64_t num_allocs; + + // The peak memory use between Start and Stop. + int64_t max_bytes_used; + + // The total memory allocated, in bytes, between Start and Stop. + // Init'ed to TombstoneValue if metric not available. + int64_t total_allocated_bytes; + + // The net changes in memory, in bytes, between Start and Stop. + // ie., total_allocated_bytes - total_deallocated_bytes. + // Init'ed to TombstoneValue if metric not available. + int64_t net_heap_growth; + }; + + virtual ~MemoryManager() {} + + // Implement this to start recording allocation information. + virtual void Start() = 0; + + // Implement this to stop recording and fill out the given Result structure. + BENCHMARK_DEPRECATED_MSG("Use Stop(Result&) instead") + virtual void Stop(Result* result) = 0; + + // FIXME(vyng): Make this pure virtual once we've migrated current users. + BENCHMARK_DISABLE_DEPRECATED_WARNING + virtual void Stop(Result& result) { Stop(&result); } + BENCHMARK_RESTORE_DEPRECATED_WARNING +}; // Register a MemoryManager instance that will be used to collect and report // allocation measurements for benchmark runs. +BENCHMARK_EXPORT void RegisterMemoryManager(MemoryManager* memory_manager); // Add a key-value pair to output as part of the context stanza in the report. +BENCHMARK_EXPORT void AddCustomContext(const std::string& key, const std::string& value); namespace internal { @@ -311,14 +412,15 @@ class BenchmarkImp; class BenchmarkFamilies; +BENCHMARK_EXPORT void UseCharPointer(char const volatile*); // Take ownership of the pointer and register the benchmark. Return the // registered benchmark. -Benchmark* RegisterBenchmarkInternal(Benchmark*); +BENCHMARK_EXPORT Benchmark* RegisterBenchmarkInternal(Benchmark*); // Ensure that the standard streams are properly initialized in every TU. -int InitializeStreams(); +BENCHMARK_EXPORT int InitializeStreams(); BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); } // namespace internal @@ -385,27 +487,27 @@ kDefaults = 0, // Mark the counter as a rate. It will be presented divided // by the duration of the benchmark. - kIsRate = 1U << 0U, + kIsRate = 1 << 0, // Mark the counter as a thread-average quantity. It will be // presented divided by the number of threads. - kAvgThreads = 1U << 1U, + kAvgThreads = 1 << 1, // Mark the counter as a thread-average rate. See above. kAvgThreadsRate = kIsRate | kAvgThreads, // Mark the counter as a constant value, valid/same for *every* iteration. // When reporting, it will be *multiplied* by the iteration count. - kIsIterationInvariant = 1U << 2U, + kIsIterationInvariant = 1 << 2, // Mark the counter as a constant rate. // When reporting, it will be *multiplied* by the iteration count // and then divided by the duration of the benchmark. kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, // Mark the counter as a iteration-average quantity. // It will be presented divided by the number of iterations. - kAvgIterations = 1U << 3U, + kAvgIterations = 1 << 3, // Mark the counter as a iteration-average rate. See above. kAvgIterationsRate = kIsRate | kAvgIterations, // In the end, invert the result. This is always done last! - kInvert = 1U << 31U + kInvert = 1 << 31 }; enum OneK { @@ -423,7 +525,7 @@ Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000) : value(v), flags(f), oneK(k) {} - BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; } + BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; } BENCHMARK_ALWAYS_INLINE operator double&() { return value; } }; @@ -438,10 +540,6 @@ // This is the container for the user-defined counters. typedef std::map UserCounters; -// TimeUnit is passed to a benchmark in order to specify the order of magnitude -// for the measured time. -enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; - // BigO is passed to a benchmark in order to specify the asymptotic // computational // complexity for the benchmark. In case oAuto is selected, complexity will be @@ -450,6 +548,8 @@ typedef uint64_t IterationCount; +enum StatisticUnit { kTime, kPercentage }; + // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. typedef double(BigOFunc)(IterationCount); @@ -462,9 +562,11 @@ struct Statistics { std::string name_; StatisticsFunc* compute_; + StatisticUnit unit_; - Statistics(const std::string& name, StatisticsFunc* compute) - : name_(name), compute_(compute) {} + Statistics(const std::string& name, StatisticsFunc* compute, + StatisticUnit unit = kTime) + : name_(name), compute_(compute), unit_(unit) {} }; class BenchmarkInstance; @@ -496,7 +598,7 @@ // State is passed to a running Benchmark and contains state for the // benchmark to use. -class State { +class BENCHMARK_EXPORT State { public: struct StateIterator; friend struct StateIterator; @@ -667,6 +769,14 @@ BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") int64_t range_y() const { return range(1); } + // Number of threads concurrently executing the benchmark. + BENCHMARK_ALWAYS_INLINE + int threads() const { return threads_; } + + // Index of the executing thread. Values from [0, threads). + BENCHMARK_ALWAYS_INLINE + int thread_index() const { return thread_index_; } + BENCHMARK_ALWAYS_INLINE IterationCount iterations() const { if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { @@ -675,8 +785,8 @@ return max_iterations - total_iterations_ + batch_leftover_; } - private - : // items we expect on the first cache line (ie 64 bytes of the struct) + private: + // items we expect on the first cache line (ie 64 bytes of the struct) // When total_iterations_ is 0, KeepRunning() and friends will return false. // May be larger than max_iterations. IterationCount total_iterations_; @@ -694,7 +804,7 @@ bool finished_; bool error_occurred_; - private: // items we don't need on the first cache line + // items we don't need on the first cache line std::vector range_; int64_t complexity_n_; @@ -702,10 +812,6 @@ public: // Container for user-defined counters. UserCounters counters; - // Index of the executing thread. Values from [0, threads). - const int thread_index; - // Number of threads concurrently executing the benchmark. - const int threads; private: State(IterationCount max_iters, const std::vector& ranges, @@ -718,6 +824,10 @@ // is_batch must be true unless n is 1. bool KeepRunningInternal(IterationCount n, bool is_batch); void FinishKeepRunning(); + + const int thread_index_; + const int threads_; + internal::ThreadTimer* const timer_; internal::ThreadManager* const manager_; internal::PerfCountersMeasurement* const perf_counters_measurement_; @@ -819,7 +929,7 @@ // be called on this object to change the properties of the benchmark. // Each method returns "this" so that multiple method calls can // chained into one expression. -class Benchmark { +class BENCHMARK_EXPORT Benchmark { public: virtual ~Benchmark(); @@ -889,6 +999,23 @@ return Ranges(ranges); } + // Have "setup" and/or "teardown" invoked once for every benchmark run. + // If the benchmark is multi-threaded (will run in k threads concurrently), + // the setup callback will be be invoked exactly once (not k times) before + // each run with k threads. Time allowing (e.g. for a short benchmark), there + // may be multiple such runs per benchmark, each run with its own + // "setup"/"teardown". + // + // If the benchmark uses different size groups of threads (e.g. via + // ThreadRange), the above will be true for each size group. + // + // The callback will be passed a State object, which includes the number + // of threads, thread-index, benchmark arguments, etc. + // + // The callback must not be NULL or self-deleting. + Benchmark* Setup(void (*setup)(const benchmark::State&)); + Benchmark* Teardown(void (*teardown)(const benchmark::State&)); + // Pass this benchmark object to *func, which can customize // the benchmark by calling various methods like Arg, Args, // Threads, etc. @@ -957,7 +1084,9 @@ Benchmark* Complexity(BigOFunc* complexity); // Add this statistics to be computed over all the values of benchmark run - Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); + Benchmark* ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit = kTime); // Support for running multiple copies of the same benchmark concurrently // in multiple threads. This may be useful when measuring the scaling @@ -991,9 +1120,10 @@ virtual void Run(State& state) = 0; + TimeUnit GetTimeUnit() const; + protected: explicit Benchmark(const char* name); - Benchmark(Benchmark const&); void SetName(const char* name); int ArgsCnt() const; @@ -1006,7 +1136,10 @@ AggregationReportMode aggregation_report_mode_; std::vector arg_names_; // Args for all benchmark runs std::vector > args_; // Args for all benchmark runs + TimeUnit time_unit_; + bool use_default_time_unit_; + int range_multiplier_; double min_time_; IterationCount iterations_; @@ -1019,7 +1152,21 @@ std::vector statistics_; std::vector thread_counts_; - Benchmark& operator=(Benchmark const&); + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_; + callback_function teardown_; + + Benchmark(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; + + Benchmark& operator=(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; }; } // namespace internal @@ -1038,12 +1185,12 @@ // Remove all registered benchmarks. All pointers to previously registered // benchmarks are invalidated. -void ClearRegisteredBenchmarks(); +BENCHMARK_EXPORT void ClearRegisteredBenchmarks(); namespace internal { // The class used to hold all Benchmarks created from static function. // (ie those created using the BENCHMARK(...) macros. -class FunctionBenchmark : public Benchmark { +class BENCHMARK_EXPORT FunctionBenchmark : public Benchmark { public: FunctionBenchmark(const char* name, Function* func) : Benchmark(name), func_(func) {} @@ -1067,14 +1214,12 @@ LambdaBenchmark(LambdaBenchmark const&) = delete; - private: - template + template // NOLINTNEXTLINE(readability-redundant-declaration) friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); Lambda lambda_; }; #endif - } // namespace internal inline internal::Benchmark* RegisterBenchmark(const char* name, @@ -1126,7 +1271,6 @@ protected: virtual void BenchmarkCase(State&) = 0; }; - } // namespace benchmark // ------------------------------------------------------ @@ -1142,22 +1286,37 @@ #endif // Helpers for generating unique variable names +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_PRIVATE_NAME(...) \ + BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, \ + __VA_ARGS__) +#else #define BENCHMARK_PRIVATE_NAME(n) \ BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n) +#endif // BENCHMARK_HAS_CXX11 + #define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) #define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c // Helper for concatenation with macro name expansion #define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \ - BaseClass##_##Method##_Benchmark + BaseClass##_##Method##_Benchmark #define BENCHMARK_PRIVATE_DECLARE(n) \ static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ BENCHMARK_UNUSED +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK(...) \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \ + &__VA_ARGS__))) +#else #define BENCHMARK(n) \ BENCHMARK_PRIVATE_DECLARE(n) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ new ::benchmark::internal::FunctionBenchmark(#n, n))) +#endif // BENCHMARK_HAS_CXX11 // Old-style macros #define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) @@ -1221,7 +1380,7 @@ #define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ + BaseClass##_##Method##_Benchmark() { \ this->SetName(#BaseClass "/" #Method); \ } \ \ @@ -1232,7 +1391,7 @@ #define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ + BaseClass##_##Method##_Benchmark() { \ this->SetName(#BaseClass "<" #a ">/" #Method); \ } \ \ @@ -1243,7 +1402,7 @@ #define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ class BaseClass##_##Method##_Benchmark : public BaseClass { \ public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ + BaseClass##_##Method##_Benchmark() { \ this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ } \ \ @@ -1255,7 +1414,7 @@ #define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ public: \ - BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ + BaseClass##_##Method##_Benchmark() { \ this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ } \ \ @@ -1337,7 +1496,7 @@ namespace benchmark { -struct CPUInfo { +struct BENCHMARK_EXPORT CPUInfo { struct CacheInfo { std::string type; int level; @@ -1345,11 +1504,7 @@ int num_sharing; }; - enum Scaling { - UNKNOWN, - ENABLED, - DISABLED - }; + enum Scaling { UNKNOWN, ENABLED, DISABLED }; int num_cpus; Scaling scaling; @@ -1365,7 +1520,7 @@ }; // Adding Struct for System Information -struct SystemInfo { +struct BENCHMARK_EXPORT SystemInfo { std::string name; static const SystemInfo& Get(); @@ -1377,7 +1532,7 @@ // BenchmarkName contains the components of the Benchmark's name // which allows individual fields to be modified or cleared before // building the final name using 'str()'. -struct BenchmarkName { +struct BENCHMARK_EXPORT BenchmarkName { std::string function_name; std::string args; std::string min_time; @@ -1396,7 +1551,7 @@ // can control the destination of the reports by calling // RunSpecifiedBenchmarks and passing it a custom reporter object. // The reporter object must implement the following interface. -class BenchmarkReporter { +class BENCHMARK_EXPORT BenchmarkReporter { public: struct Context { CPUInfo const& cpu_info; @@ -1407,16 +1562,17 @@ Context(); }; - struct Run { + struct BENCHMARK_EXPORT Run { static const int64_t no_repetition_index = -1; enum RunType { RT_Iteration, RT_Aggregate }; Run() : run_type(RT_Iteration), + aggregate_unit(kTime), error_occurred(false), iterations(1), threads(1), - time_unit(kNanosecond), + time_unit(GetDefaultTimeUnit()), real_accumulated_time(0), cpu_accumulated_time(0), max_heapbytes_used(0), @@ -1425,10 +1581,8 @@ complexity_n(0), report_big_o(false), report_rms(false), - counters(), - has_memory_result(false), - allocs_per_iter(0.0), - max_bytes_used(0) {} + memory_result(NULL), + allocs_per_iter(0.0) {} std::string benchmark_name() const; BenchmarkName run_name; @@ -1436,6 +1590,7 @@ int64_t per_family_instance_index; RunType run_type; std::string aggregate_name; + StatisticUnit aggregate_unit; std::string report_label; // Empty if not set by benchmark. bool error_occurred; std::string error_message; @@ -1478,9 +1633,8 @@ UserCounters counters; // Memory metrics. - bool has_memory_result; + const MemoryManager::Result* memory_result; double allocs_per_iter; - int64_t max_bytes_used; }; struct PerFamilyRunReports { @@ -1553,7 +1707,7 @@ // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter { public: enum OutputOptions { OO_None = 0, @@ -1563,10 +1717,7 @@ OO_Defaults = OO_ColorTabular }; explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) - : output_options_(opts_), - name_field_width_(0), - prev_counters_(), - printed_header_(false) {} + : output_options_(opts_), name_field_width_(0), printed_header_(false) {} virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; @@ -1581,7 +1732,7 @@ bool printed_header_; }; -class JSONReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter { public: JSONReporter() : first_report_(true) {} virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; @@ -1594,7 +1745,7 @@ bool first_report_; }; -class BENCHMARK_DEPRECATED_MSG( +class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG( "The CSV Reporter will be removed in a future release") CSVReporter : public BenchmarkReporter { public: @@ -1609,29 +1760,6 @@ std::set user_counter_names_; }; -// If a MemoryManager is registered, it can be used to collect and report -// allocation metrics for a run of the benchmark. -class MemoryManager { - public: - struct Result { - Result() : num_allocs(0), max_bytes_used(0) {} - - // The number of allocations made in total between Start and Stop. - int64_t num_allocs; - - // The peak memory use between Start and Stop. - int64_t max_bytes_used; - }; - - virtual ~MemoryManager() {} - - // Implement this to start recording allocation information. - virtual void Start() = 0; - - // Implement this to stop recording and fill out the given Result structure. - virtual void Stop(Result* result) = 0; -}; - inline const char* GetTimeUnitString(TimeUnit unit) { switch (unit) { case kSecond: @@ -1669,12 +1797,17 @@ // CreateRange(0, 100, /*multi=*/4), // CreateDenseRange(0, 4, /*step=*/1), // }); +BENCHMARK_EXPORT std::vector CreateRange(int64_t lo, int64_t hi, int multi); // Creates a list of integer values for the given range and step. -std::vector CreateDenseRange(int64_t start, int64_t limit, - int step); +BENCHMARK_EXPORT +std::vector CreateDenseRange(int64_t start, int64_t limit, int step); } // namespace benchmark +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + #endif // BENCHMARK_BENCHMARK_H_ diff --git a/MicroBenchmarks/libs/benchmark/requirements.txt b/MicroBenchmarks/libs/benchmark/requirements.txt --- a/MicroBenchmarks/libs/benchmark/requirements.txt +++ b/MicroBenchmarks/libs/benchmark/requirements.txt @@ -1,2 +1,3 @@ -numpy == 1.19.4 +numpy == 1.21 scipy == 1.5.4 +pandas == 1.1.5 diff --git a/MicroBenchmarks/libs/benchmark/setup.py b/MicroBenchmarks/libs/benchmark/setup.py --- a/MicroBenchmarks/libs/benchmark/setup.py +++ b/MicroBenchmarks/libs/benchmark/setup.py @@ -1,5 +1,6 @@ import os import posixpath +import platform import re import shutil import sys @@ -15,6 +16,10 @@ IS_WINDOWS = sys.platform.startswith("win") +with open("README.md", "r", encoding="utf-8") as fp: + long_description = fp.read() + + def _get_version(): """Parse the version string from __init__.py.""" with open( @@ -89,6 +94,14 @@ # Link with python*.lib. for library_dir in self.library_dirs: bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) + elif sys.platform == "darwin" and platform.machine() == "x86_64": + bazel_argv.append("--macos_minimum_os=10.9") + + # ARCHFLAGS is always set by cibuildwheel before macOS wheel builds. + archflags = os.getenv("ARCHFLAGS", "") + if "arm64" in archflags: + bazel_argv.append("--cpu=darwin_arm64") + bazel_argv.append("--macos_cpus=arm64") self.spawn(bazel_argv) @@ -109,6 +122,8 @@ version=_get_version(), url="https://github.com/google/benchmark", description="A library to benchmark code snippets.", + long_description=long_description, + long_description_content_type="text/markdown", author="Google", author_email="benchmark-py@google.com", # Contained modules and scripts. diff --git a/MicroBenchmarks/libs/benchmark/src/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/src/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/src/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/src/CMakeLists.txt @@ -22,41 +22,40 @@ set_target_properties(benchmark PROPERTIES OUTPUT_NAME "benchmark" VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} + SOVERSION 2 ) target_include_directories(benchmark PUBLIC - $ - ) + $ + $ +) + +generate_export_header(benchmark + EXPORT_FILE_NAME ${PROJECT_BINARY_DIR}/include/benchmark/export.h) # libpfm, if available if (HAVE_LIBPFM) - target_link_libraries(benchmark libpfm.a) + target_link_libraries(benchmark PRIVATE pfm) add_definitions(-DHAVE_LIBPFM) endif() # Link threads. -target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) -find_library(LIBRT rt) -if(LIBRT) - target_link_libraries(benchmark ${LIBRT}) -endif() +target_link_libraries(benchmark PRIVATE Threads::Threads) + +target_link_libraries(benchmark PRIVATE ${BENCHMARK_CXX_LIBRARIES}) + +if(HAVE_LIB_RT) + target_link_libraries(benchmark PRIVATE rt) +endif(HAVE_LIB_RT) -if(CMAKE_BUILD_TYPE) - string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) -endif() -if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*") - message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.") - target_link_libraries(benchmark -pthread) -endif() # We need extra libraries on Windows if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - target_link_libraries(benchmark shlwapi) + target_link_libraries(benchmark PRIVATE shlwapi) endif() # We need extra libraries on Solaris if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") - target_link_libraries(benchmark kstat) + target_link_libraries(benchmark PRIVATE kstat) endif() # Benchmark main library @@ -65,35 +64,47 @@ set_target_properties(benchmark_main PROPERTIES OUTPUT_NAME "benchmark_main" VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} + SOVERSION 2 + DEFINE_SYMBOL benchmark_EXPORTS ) -target_include_directories(benchmark PUBLIC - $ - ) -target_link_libraries(benchmark_main benchmark::benchmark) +target_link_libraries(benchmark_main PUBLIC benchmark::benchmark) -set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") +set(generated_dir "${PROJECT_BINARY_DIR}") set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") +set(targets_to_export benchmark benchmark_main) set(targets_export_name "${PROJECT_NAME}Targets") set(namespace "${PROJECT_NAME}::") include(CMakePackageConfigHelpers) + +configure_package_config_file ( + ${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in + ${project_config} + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + NO_SET_AND_CHECK_MACRO + NO_CHECK_REQUIRED_COMPONENTS_MACRO +) write_basic_package_version_file( "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion ) -configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) +export ( + TARGETS ${targets_to_export} + NAMESPACE "${namespace}" + FILE ${generated_dir}/${targets_export_name}.cmake +) + if (BENCHMARK_ENABLE_INSTALL) # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) install( - TARGETS benchmark benchmark_main + TARGETS ${targets_to_export} EXPORT ${targets_export_name} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} @@ -102,6 +113,7 @@ install( DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + "${PROJECT_BINARY_DIR}/include/benchmark" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.*h") @@ -118,3 +130,37 @@ NAMESPACE "${namespace}" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") endif() + +if (BENCHMARK_ENABLE_DOXYGEN) + find_package(Doxygen REQUIRED) + set(DOXYGEN_QUIET YES) + set(DOXYGEN_RECURSIVE YES) + set(DOXYGEN_GENERATE_HTML YES) + set(DOXYGEN_GENERATE_MAN NO) + set(DOXYGEN_MARKDOWN_SUPPORT YES) + set(DOXYGEN_BUILTIN_STL_SUPPORT YES) + set(DOXYGEN_EXTRACT_PACKAGE YES) + set(DOXYGEN_EXTRACT_STATIC YES) + set(DOXYGEN_SHOW_INCLUDE_FILES YES) + set(DOXYGEN_BINARY_TOC YES) + set(DOXYGEN_TOC_EXPAND YES) + set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "index.md") + doxygen_add_docs(benchmark_doxygen + docs + include + src + ALL + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMENT "Building documentation with Doxygen.") + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() +else() + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/docs/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() +endif() diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark.cc b/MicroBenchmarks/libs/benchmark/src/benchmark.cc --- a/MicroBenchmarks/libs/benchmark/src/benchmark.cc +++ b/MicroBenchmarks/libs/benchmark/src/benchmark.cc @@ -63,7 +63,7 @@ // A regular expression that specifies the set of benchmarks to execute. If // this flag is empty, or if this flag is the string \"all\", all benchmarks // linked into the binary are run. -BM_DEFINE_string(benchmark_filter, "."); +BM_DEFINE_string(benchmark_filter, ""); // Minimum number of seconds we should run benchmark before results are // considered significant. For cpu-time based tests, this is the lower bound @@ -121,12 +121,16 @@ // pairs. Kept internal as it's only used for parsing from env/command line. BM_DEFINE_kvpairs(benchmark_context, {}); +// Set the default time unit to use for reports +// Valid values are 'ns', 'us', 'ms' or 's' +BM_DEFINE_string(benchmark_time_unit, ""); + // The level of verbose logging to output BM_DEFINE_int32(v, 0); namespace internal { -std::map* global_context = nullptr; +BENCHMARK_EXPORT std::map* global_context = nullptr; // FIXME: wouldn't LTO mess this up? void UseCharPointer(char const volatile*) {} @@ -145,14 +149,13 @@ error_occurred_(false), range_(ranges), complexity_n_(0), - counters(), - thread_index(thread_i), - threads(n_threads), + thread_index_(thread_i), + threads_(n_threads), timer_(timer), manager_(manager), perf_counters_measurement_(perf_counters_measurement) { BM_CHECK(max_iterations != 0) << "At least one iteration must be run"; - BM_CHECK_LT(thread_index, threads) + BM_CHECK_LT(thread_index_, threads_) << "thread_index must be less than threads"; // Note: The use of offsetof below is technically undefined until C++17 @@ -185,7 +188,10 @@ BM_CHECK(started_ && !finished_ && !error_occurred_); timer_->StopTimer(); if (perf_counters_measurement_) { - auto measurements = perf_counters_measurement_->StopAndGetMeasurements(); + std::vector> measurements; + if (!perf_counters_measurement_->Stop(measurements)) { + BM_CHECK(false) << "Perf counters read the value failed."; + } for (const auto& name_and_measurement : measurements) { auto name = name_and_measurement.first; auto measurement = name_and_measurement.second; @@ -363,7 +369,7 @@ additional_run_stats.begin(), additional_run_stats.end()); per_family_reports.erase( - (int)reports_for_family->Runs.front().family_index); + static_cast(reports_for_family->Runs.front().family_index)); } } @@ -378,10 +384,7 @@ // Disable deprecated warnings temporarily because we need to reference // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif +BENCHMARK_DISABLE_DEPRECATED_WARNING std::unique_ptr CreateReporter( std::string const& name, ConsoleReporter::OutputOptions output_opts) { @@ -389,18 +392,16 @@ if (name == "console") { return PtrType(new ConsoleReporter(output_opts)); } else if (name == "json") { - return PtrType(new JSONReporter); + return PtrType(new JSONReporter()); } else if (name == "csv") { - return PtrType(new CSVReporter); + return PtrType(new CSVReporter()); } else { std::cerr << "Unexpected format: '" << name << "'\n"; std::exit(1); } } -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif +BENCHMARK_RESTORE_DEPRECATED_WARNING } // end namespace @@ -434,17 +435,41 @@ } // end namespace internal +BenchmarkReporter* CreateDefaultDisplayReporter() { + static auto default_display_reporter = + internal::CreateReporter(FLAGS_benchmark_format, + internal::GetOutputOptions()) + .release(); + return default_display_reporter; +} + size_t RunSpecifiedBenchmarks() { - return RunSpecifiedBenchmarks(nullptr, nullptr); + return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(std::string spec) { + return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec)); } size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { - return RunSpecifiedBenchmarks(display_reporter, nullptr); + return RunSpecifiedBenchmarks(display_reporter, nullptr, + FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + std::string spec) { + return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec)); } size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter) { - std::string spec = FLAGS_benchmark_filter; + return RunSpecifiedBenchmarks(display_reporter, file_reporter, + FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, + std::string spec) { if (spec.empty() || spec == "all") spec = "."; // Regexp that matches all benchmarks @@ -453,8 +478,7 @@ std::unique_ptr default_display_reporter; std::unique_ptr default_file_reporter; if (!display_reporter) { - default_display_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); + default_display_reporter.reset(CreateDefaultDisplayReporter()); display_reporter = default_display_reporter.get(); } auto& Out = display_reporter->GetOutputStream(); @@ -500,6 +524,21 @@ return benchmarks.size(); } +namespace { +// stores the time unit benchmarks use by default +TimeUnit default_time_unit = kNanosecond; +} // namespace + +TimeUnit GetDefaultTimeUnit() { return default_time_unit; } + +void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; } + +std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; } + +void SetBenchmarkFilter(std::string value) { + FLAGS_benchmark_filter = std::move(value); +} + void RegisterMemoryManager(MemoryManager* manager) { internal::memory_manager = manager; } @@ -516,27 +555,47 @@ namespace internal { +void (*HelperPrintf)(); + void PrintUsageAndExit() { - fprintf(stdout, - "benchmark" - " [--benchmark_list_tests={true|false}]\n" - " [--benchmark_filter=]\n" - " [--benchmark_min_time=]\n" - " [--benchmark_repetitions=]\n" - " [--benchmark_enable_random_interleaving={true|false}]\n" - " [--benchmark_report_aggregates_only={true|false}]\n" - " [--benchmark_display_aggregates_only={true|false}]\n" - " [--benchmark_format=]\n" - " [--benchmark_out=]\n" - " [--benchmark_out_format=]\n" - " [--benchmark_color={auto|true|false}]\n" - " [--benchmark_counters_tabular={true|false}]\n" - " [--benchmark_perf_counters=,...]\n" - " [--benchmark_context==,...]\n" - " [--v=]\n"); + if (HelperPrintf) { + HelperPrintf(); + } else { + fprintf(stdout, + "benchmark" + " [--benchmark_list_tests={true|false}]\n" + " [--benchmark_filter=]\n" + " [--benchmark_min_time=]\n" + " [--benchmark_repetitions=]\n" + " [--benchmark_enable_random_interleaving={true|false}]\n" + " [--benchmark_report_aggregates_only={true|false}]\n" + " [--benchmark_display_aggregates_only={true|false}]\n" + " [--benchmark_format=]\n" + " [--benchmark_out=]\n" + " [--benchmark_out_format=]\n" + " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" + " [--benchmark_context==,...]\n" + " [--benchmark_time_unit={ns|us|ms|s}]\n" + " [--v=]\n"); + } exit(0); } +void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) { + if (time_unit_flag == "s") { + return SetDefaultTimeUnit(kSecond); + } else if (time_unit_flag == "ms") { + return SetDefaultTimeUnit(kMillisecond); + } else if (time_unit_flag == "us") { + return SetDefaultTimeUnit(kMicrosecond); + } else if (time_unit_flag == "ns") { + return SetDefaultTimeUnit(kNanosecond); + } else if (!time_unit_flag.empty()) { + PrintUsageAndExit(); + } +} + void ParseCommandLineFlags(int* argc, char** argv) { using namespace benchmark; BenchmarkReporter::Context::executable_name = @@ -560,15 +619,14 @@ ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) || ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || - // "color_print" is the deprecated name for "benchmark_color". - // TODO: Remove this. - ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) || ParseStringFlag(argv[i], "benchmark_perf_counters", &FLAGS_benchmark_perf_counters) || ParseKeyValueFlag(argv[i], "benchmark_context", &FLAGS_benchmark_context) || + ParseStringFlag(argv[i], "benchmark_time_unit", + &FLAGS_benchmark_time_unit) || ParseInt32Flag(argv[i], "v", &FLAGS_v)) { for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; @@ -584,6 +642,7 @@ PrintUsageAndExit(); } } + SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit); if (FLAGS_benchmark_color.empty()) { PrintUsageAndExit(); } @@ -599,14 +658,13 @@ } // end namespace internal -void Initialize(int* argc, char** argv) { +void Initialize(int* argc, char** argv, void (*HelperPrintf)()) { internal::ParseCommandLineFlags(argc, argv); internal::LogLevel() = FLAGS_v; + internal::HelperPrintf = HelperPrintf; } -void Shutdown() { - delete internal::global_context; -} +void Shutdown() { delete internal::global_context; } bool ReportUnrecognizedArguments(int argc, char** argv) { for (int i = 1; i < argc; ++i) { diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.h b/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.h --- a/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.h +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.h @@ -38,6 +38,8 @@ double min_time() const { return min_time_; } IterationCount iterations() const { return iterations_; } int threads() const { return threads_; } + void Setup() const; + void Teardown() const; State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, internal::ThreadManager* manager, @@ -62,6 +64,10 @@ double min_time_; IterationCount iterations_; int threads_; // Number of concurrent threads to us + + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_ = nullptr; + callback_function teardown_ = nullptr; }; bool FindBenchmarksInternal(const std::string& re, @@ -70,6 +76,7 @@ bool IsZero(double n); +BENCHMARK_EXPORT ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); } // end namespace internal diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.cc b/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.cc --- a/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.cc +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_api_internal.cc @@ -16,7 +16,7 @@ per_family_instance_index_(per_family_instance_idx), aggregation_report_mode_(benchmark_.aggregation_report_mode_), args_(args), - time_unit_(benchmark_.time_unit_), + time_unit_(benchmark_.GetTimeUnit()), measure_process_cpu_time_(benchmark_.measure_process_cpu_time_), use_real_time_(benchmark_.use_real_time_), use_manual_time_(benchmark_.use_manual_time_), @@ -78,6 +78,9 @@ if (!benchmark_.thread_counts_.empty()) { name_.threads = StrFormat("threads:%d", threads_); } + + setup_ = benchmark_.setup_; + teardown_ = benchmark_.teardown_; } State BenchmarkInstance::Run( @@ -90,5 +93,20 @@ return st; } +void BenchmarkInstance::Setup() const { + if (setup_) { + State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, + nullptr); + setup_(st); + } +} + +void BenchmarkInstance::Teardown() const { + if (teardown_) { + State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, + nullptr); + teardown_(st); + } +} } // namespace internal } // namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_main.cc b/MicroBenchmarks/libs/benchmark/src/benchmark_main.cc --- a/MicroBenchmarks/libs/benchmark/src/benchmark_main.cc +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_main.cc @@ -14,4 +14,5 @@ #include "benchmark/benchmark.h" +BENCHMARK_EXPORT int main(int, char**); BENCHMARK_MAIN(); diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_register.h b/MicroBenchmarks/libs/benchmark/src/benchmark_register.h --- a/MicroBenchmarks/libs/benchmark/src/benchmark_register.h +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_register.h @@ -12,8 +12,8 @@ // Append the powers of 'mult' in the closed interval [lo, hi]. // Returns iterator to the start of the inserted range. template -typename std::vector::iterator -AddPowers(std::vector* dst, T lo, T hi, int mult) { +typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, + int mult) { BM_CHECK_GE(lo, 0); BM_CHECK_GE(hi, lo); BM_CHECK_GE(mult, 2); diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_register.cc b/MicroBenchmarks/libs/benchmark/src/benchmark_register.cc --- a/MicroBenchmarks/libs/benchmark/src/benchmark_register.cc +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_register.cc @@ -202,7 +202,8 @@ Benchmark::Benchmark(const char* name) : name_(name), aggregation_report_mode_(ARM_Unspecified), - time_unit_(kNanosecond), + time_unit_(GetDefaultTimeUnit()), + use_default_time_unit_(true), range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), @@ -211,10 +212,13 @@ use_real_time_(false), use_manual_time_(false), complexity_(oNone), - complexity_lambda_(nullptr) { + complexity_lambda_(nullptr), + setup_(nullptr), + teardown_(nullptr) { ComputeStatistics("mean", StatisticsMean); ComputeStatistics("median", StatisticsMedian); ComputeStatistics("stddev", StatisticsStdDev); + ComputeStatistics("cv", StatisticsCV, kPercentage); } Benchmark::~Benchmark() {} @@ -232,6 +236,7 @@ Benchmark* Benchmark::Unit(TimeUnit unit) { time_unit_ = unit; + use_default_time_unit_ = false; return this; } @@ -320,6 +325,18 @@ return this; } +Benchmark* Benchmark::Setup(void (*setup)(const benchmark::State&)) { + BM_CHECK(setup != nullptr); + setup_ = setup; + return this; +} + +Benchmark* Benchmark::Teardown(void (*teardown)(const benchmark::State&)) { + BM_CHECK(teardown != nullptr); + teardown_ = teardown; + return this; +} + Benchmark* Benchmark::RangeMultiplier(int multiplier) { BM_CHECK(multiplier > 1); range_multiplier_ = multiplier; @@ -398,9 +415,10 @@ return this; } -Benchmark* Benchmark::ComputeStatistics(std::string name, - StatisticsFunc* statistics) { - statistics_.emplace_back(name, statistics); +Benchmark* Benchmark::ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit) { + statistics_.emplace_back(name, statistics, unit); return this; } @@ -446,6 +464,10 @@ return static_cast(args_.front().size()); } +TimeUnit Benchmark::GetTimeUnit() const { + return use_default_time_unit_ ? GetDefaultTimeUnit() : time_unit_; +} + //=============================================================================// // FunctionBenchmark //=============================================================================// @@ -464,8 +486,7 @@ return args; } -std::vector CreateDenseRange(int64_t start, int64_t limit, - int step) { +std::vector CreateDenseRange(int64_t start, int64_t limit, int step) { BM_CHECK_LE(start, limit); std::vector args; for (int64_t arg = start; arg <= limit; arg += step) { diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_runner.h b/MicroBenchmarks/libs/benchmark/src/benchmark_runner.h --- a/MicroBenchmarks/libs/benchmark/src/benchmark_runner.h +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_runner.h @@ -76,6 +76,8 @@ std::vector pool; + std::vector memory_results; + IterationCount iters; // preserved between repetitions! // So only the first repetition has to find/calculate it, // the other repetitions will just use that precomputed iteration count. diff --git a/MicroBenchmarks/libs/benchmark/src/benchmark_runner.cc b/MicroBenchmarks/libs/benchmark/src/benchmark_runner.cc --- a/MicroBenchmarks/libs/benchmark/src/benchmark_runner.cc +++ b/MicroBenchmarks/libs/benchmark/src/benchmark_runner.cc @@ -67,7 +67,7 @@ const benchmark::internal::BenchmarkInstance& b, const internal::ThreadManager::Result& results, IterationCount memory_iterations, - const MemoryManager::Result& memory_result, double seconds, + const MemoryManager::Result* memory_result, double seconds, int64_t repetition_index, int64_t repeats) { // Create report about this benchmark run. BenchmarkReporter::Run report; @@ -99,12 +99,12 @@ report.counters = results.counters; if (memory_iterations > 0) { - report.has_memory_result = true; + assert(memory_result != nullptr); + report.memory_result = memory_result; report.allocs_per_iter = - memory_iterations ? static_cast(memory_result.num_allocs) / + memory_iterations ? static_cast(memory_result->num_allocs) / memory_iterations : 0; - report.max_bytes_used = memory_result.max_bytes_used; } internal::Finish(&report.counters, results.iterations, seconds, @@ -152,8 +152,7 @@ has_explicit_iteration_count(b.iterations() != 0), pool(b.threads() - 1), iters(has_explicit_iteration_count ? b.iterations() : 1), - perf_counters_measurement( - PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))), + perf_counters_measurement(StrSplit(FLAGS_benchmark_perf_counters, ',')), perf_counters_measurement_ptr(perf_counters_measurement.IsValid() ? &perf_counters_measurement : nullptr) { @@ -239,8 +238,7 @@ // NOTE: When the last run was at least 10% of the min time the max // expansion should be 14x. bool is_significant = (i.seconds / min_time) > 0.1; - multiplier = is_significant ? multiplier : std::min(10.0, multiplier); - if (multiplier <= 1.0) multiplier = 2.0; + multiplier = is_significant ? multiplier : 10.0; // So what seems to be the sufficiently-large iteration count? Round up. const IterationCount max_next_iters = static_cast( @@ -280,7 +278,9 @@ // is *only* calculated for the *first* repetition, and other repetitions // simply use that precomputed iteration count. for (;;) { + b.Setup(); i = DoNIterations(); + b.Teardown(); // Do we consider the results to be significant? // If we are doing repetitions, and the first repetition was already done, @@ -303,21 +303,30 @@ } // Oh, one last thing, we need to also produce the 'memory measurements'.. - MemoryManager::Result memory_result; + MemoryManager::Result* memory_result = nullptr; IterationCount memory_iterations = 0; if (memory_manager != nullptr) { + // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an + // optional so we don't have to own the Result here. + // Can't do it now due to cxx03. + memory_results.push_back(MemoryManager::Result()); + memory_result = &memory_results.back(); // Only run a few iterations to reduce the impact of one-time // allocations in benchmarks that are not properly managed. memory_iterations = std::min(16, iters); memory_manager->Start(); std::unique_ptr manager; manager.reset(new internal::ThreadManager(1)); + b.Setup(); RunInThread(&b, memory_iterations, 0, manager.get(), perf_counters_measurement_ptr); manager->WaitForAllThreads(); manager.reset(); + b.Teardown(); - memory_manager->Stop(&memory_result); + BENCHMARK_DISABLE_DEPRECATED_WARNING + memory_manager->Stop(memory_result); + BENCHMARK_RESTORE_DEPRECATED_WARNING } // Ok, now actually report. diff --git a/MicroBenchmarks/libs/benchmark/src/check.h b/MicroBenchmarks/libs/benchmark/src/check.h --- a/MicroBenchmarks/libs/benchmark/src/check.h +++ b/MicroBenchmarks/libs/benchmark/src/check.h @@ -5,6 +5,7 @@ #include #include +#include "benchmark/export.h" #include "internal_macros.h" #include "log.h" @@ -13,10 +14,8 @@ typedef void(AbortHandlerT)(); -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} +BENCHMARK_EXPORT +AbortHandlerT*& GetAbortHandler(); BENCHMARK_NORETURN inline void CallAbortHandler() { GetAbortHandler()(); @@ -36,10 +35,17 @@ LogType& GetLog() { return log_; } +#if defined(COMPILER_MSVC) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { log_ << std::endl; CallAbortHandler(); } +#if defined(COMPILER_MSVC) +#pragma warning(pop) +#endif CheckHandler& operator=(const CheckHandler&) = delete; CheckHandler(const CheckHandler&) = delete; diff --git a/MicroBenchmarks/libs/benchmark/src/check.cc b/MicroBenchmarks/libs/benchmark/src/check.cc new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/src/check.cc @@ -0,0 +1,11 @@ +#include "check.h" + +namespace benchmark { +namespace internal { + +static AbortHandlerT* handler = &std::abort; + +AbortHandlerT*& GetAbortHandler() { return handler; } + +} // namespace internal +} // namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/colorprint.cc b/MicroBenchmarks/libs/benchmark/src/colorprint.cc --- a/MicroBenchmarks/libs/benchmark/src/colorprint.cc +++ b/MicroBenchmarks/libs/benchmark/src/colorprint.cc @@ -25,8 +25,8 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS -#include #include +#include #else #include #endif // BENCHMARK_OS_WINDOWS @@ -102,10 +102,10 @@ return local_buff; else { // we did not provide a long enough buffer on our first attempt. - size = (size_t)ret + 1; // + 1 for the null byte + size = static_cast(ret) + 1; // + 1 for the null byte std::unique_ptr buff(new char[size]); ret = vsnprintf(buff.get(), size, msg, args); - BM_CHECK(ret > 0 && ((size_t)ret) < size); + BM_CHECK(ret > 0 && (static_cast(ret)) < size); return buff.get(); } } diff --git a/MicroBenchmarks/libs/benchmark/src/commandlineflags.h b/MicroBenchmarks/libs/benchmark/src/commandlineflags.h --- a/MicroBenchmarks/libs/benchmark/src/commandlineflags.h +++ b/MicroBenchmarks/libs/benchmark/src/commandlineflags.h @@ -5,28 +5,33 @@ #include #include +#include "benchmark/export.h" + // Macro for referencing flags. #define FLAG(name) FLAGS_##name // Macros for declaring flags. -#define BM_DECLARE_bool(name) extern bool FLAG(name) -#define BM_DECLARE_int32(name) extern int32_t FLAG(name) -#define BM_DECLARE_double(name) extern double FLAG(name) -#define BM_DECLARE_string(name) extern std::string FLAG(name) +#define BM_DECLARE_bool(name) BENCHMARK_EXPORT extern bool FLAG(name) +#define BM_DECLARE_int32(name) BENCHMARK_EXPORT extern int32_t FLAG(name) +#define BM_DECLARE_double(name) BENCHMARK_EXPORT extern double FLAG(name) +#define BM_DECLARE_string(name) BENCHMARK_EXPORT extern std::string FLAG(name) #define BM_DECLARE_kvpairs(name) \ - extern std::map FLAG(name) + BENCHMARK_EXPORT extern std::map FLAG(name) // Macros for defining flags. #define BM_DEFINE_bool(name, default_val) \ - bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) + BENCHMARK_EXPORT bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) #define BM_DEFINE_int32(name, default_val) \ - int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val) + BENCHMARK_EXPORT int32_t FLAG(name) = \ + benchmark::Int32FromEnv(#name, default_val) #define BM_DEFINE_double(name, default_val) \ - double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val) + BENCHMARK_EXPORT double FLAG(name) = \ + benchmark::DoubleFromEnv(#name, default_val) #define BM_DEFINE_string(name, default_val) \ - std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val) -#define BM_DEFINE_kvpairs(name, default_val) \ - std::map FLAG(name) = \ + BENCHMARK_EXPORT std::string FLAG(name) = \ + benchmark::StringFromEnv(#name, default_val) +#define BM_DEFINE_kvpairs(name, default_val) \ + BENCHMARK_EXPORT std::map FLAG(name) = \ benchmark::KvPairsFromEnv(#name, default_val) namespace benchmark { @@ -35,6 +40,7 @@ // // If the variable exists, returns IsTruthyFlagValue() value; if not, // returns the given default value. +BENCHMARK_EXPORT bool BoolFromEnv(const char* flag, bool default_val); // Parses an Int32 from the environment variable corresponding to the given @@ -42,6 +48,7 @@ // // If the variable exists, returns ParseInt32() value; if not, returns // the given default value. +BENCHMARK_EXPORT int32_t Int32FromEnv(const char* flag, int32_t default_val); // Parses an Double from the environment variable corresponding to the given @@ -49,6 +56,7 @@ // // If the variable exists, returns ParseDouble(); if not, returns // the given default value. +BENCHMARK_EXPORT double DoubleFromEnv(const char* flag, double default_val); // Parses a string from the environment variable corresponding to the given @@ -56,6 +64,7 @@ // // If variable exists, returns its value; if not, returns // the given default value. +BENCHMARK_EXPORT const char* StringFromEnv(const char* flag, const char* default_val); // Parses a set of kvpairs from the environment variable corresponding to the @@ -63,6 +72,7 @@ // // If variable exists, returns its value; if not, returns // the given default value. +BENCHMARK_EXPORT std::map KvPairsFromEnv( const char* flag, std::map default_val); @@ -75,40 +85,47 @@ // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseBoolFlag(const char* str, const char* flag, bool* value); // Parses a string for an Int32 flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); // Parses a string for a Double flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseDoubleFlag(const char* str, const char* flag, double* value); // Parses a string for a string flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseStringFlag(const char* str, const char* flag, std::string* value); // Parses a string for a kvpairs flag in the form "--flag=key=value,key=value" // // On success, stores the value of the flag in *value and returns true. On // failure returns false, though *value may have been mutated. +BENCHMARK_EXPORT bool ParseKeyValueFlag(const char* str, const char* flag, std::map* value); // Returns true if the string matches the flag. +BENCHMARK_EXPORT bool IsFlag(const char* str, const char* flag); // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or // some non-alphanumeric character. Also returns false if the value matches // one of 'no', 'false', 'off' (case-insensitive). As a special case, also // returns true if value is the empty string. +BENCHMARK_EXPORT bool IsTruthyFlagValue(const std::string& value); } // end namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/commandlineflags.cc b/MicroBenchmarks/libs/benchmark/src/commandlineflags.cc --- a/MicroBenchmarks/libs/benchmark/src/commandlineflags.cc +++ b/MicroBenchmarks/libs/benchmark/src/commandlineflags.cc @@ -248,9 +248,8 @@ return true; } -bool ParseKeyValueFlag( - const char* str, const char* flag, - std::map* value) { +bool ParseKeyValueFlag(const char* str, const char* flag, + std::map* value) { const char* const value_str = ParseFlagValue(str, flag, false); if (value_str == nullptr) return false; diff --git a/MicroBenchmarks/libs/benchmark/src/complexity.cc b/MicroBenchmarks/libs/benchmark/src/complexity.cc --- a/MicroBenchmarks/libs/benchmark/src/complexity.cc +++ b/MicroBenchmarks/libs/benchmark/src/complexity.cc @@ -15,12 +15,13 @@ // Source project : https://github.com/ismaelJimenez/cpp.leastsq // Adapted to be used with google benchmark -#include "benchmark/benchmark.h" +#include "complexity.h" #include #include + +#include "benchmark/benchmark.h" #include "check.h" -#include "complexity.h" namespace benchmark { @@ -199,6 +200,7 @@ big_o.repetition_index = Run::no_repetition_index; big_o.threads = reports[0].threads; big_o.aggregate_name = "BigO"; + big_o.aggregate_unit = StatisticUnit::kTime; big_o.report_label = reports[0].report_label; big_o.iterations = 0; big_o.real_accumulated_time = result_real.coef; @@ -220,6 +222,7 @@ rms.per_family_instance_index = reports[0].per_family_instance_index; rms.run_type = BenchmarkReporter::Run::RT_Aggregate; rms.aggregate_name = "RMS"; + rms.aggregate_unit = StatisticUnit::kPercentage; rms.report_label = big_o.report_label; rms.iterations = 0; rms.repetition_index = Run::no_repetition_index; diff --git a/MicroBenchmarks/libs/benchmark/src/console_reporter.cc b/MicroBenchmarks/libs/benchmark/src/console_reporter.cc --- a/MicroBenchmarks/libs/benchmark/src/console_reporter.cc +++ b/MicroBenchmarks/libs/benchmark/src/console_reporter.cc @@ -45,7 +45,7 @@ GetErrorStream() << "Color printing is only supported for stdout on windows." " Disabling color printing\n"; - output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); + output_options_ = static_cast(output_options_ & ~OO_Color); } #endif @@ -53,11 +53,12 @@ } void ConsoleReporter::PrintHeader(const Run& run) { - std::string str = FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); - if(!run.counters.empty()) { - if(output_options_ & OO_Tabular) { - for(auto const& c : run.counters) { + std::string str = + FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), + "Benchmark", "Time", "CPU", "Iterations"); + if (!run.counters.empty()) { + if (output_options_ & OO_Tabular) { + for (auto const& c : run.counters) { str += FormatString(" %10s", c.first.c_str()); } } else { @@ -97,7 +98,6 @@ va_end(args); } - static std::string FormatTime(double time) { // Align decimal places... if (time < 1.0) { @@ -115,8 +115,9 @@ void ConsoleReporter::PrintRunData(const Run& result) { typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); auto& Out = GetOutputStream(); - PrinterFn* printer = (output_options_ & OO_Color) ? - (PrinterFn*)ColorPrintf : IgnoreColorPrint; + PrinterFn* printer = (output_options_ & OO_Color) + ? static_cast(ColorPrintf) + : IgnoreColorPrint; auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; printer(Out, name_color, "%-*s ", name_field_width_, @@ -134,18 +135,23 @@ const std::string real_time_str = FormatTime(real_time); const std::string cpu_time_str = FormatTime(cpu_time); - if (result.report_big_o) { std::string big_o = GetBigOString(result.complexity); - printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), - cpu_time, big_o.c_str()); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, + big_o.c_str(), cpu_time, big_o.c_str()); } else if (result.report_rms) { printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%"); - } else { + } else if (result.run_type != Run::RT_Aggregate || + result.aggregate_unit == StatisticUnit::kTime) { const char* timeLabel = GetTimeUnitString(result.time_unit); - printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, - cpu_time_str.c_str(), timeLabel); + printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), + timeLabel, cpu_time_str.c_str(), timeLabel); + } else { + assert(result.aggregate_unit == StatisticUnit::kPercentage); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", + (100. * result.real_accumulated_time), "%", + (100. * result.cpu_accumulated_time), "%"); } if (!result.report_big_o && !result.report_rms) { @@ -153,12 +159,19 @@ } for (auto& c : result.counters) { - const std::size_t cNameLen = std::max(std::string::size_type(10), - c.first.length()); - auto const& s = HumanReadableNumber(c.second.value, c.second.oneK); + const std::size_t cNameLen = + std::max(std::string::size_type(10), c.first.length()); + std::string s; const char* unit = ""; - if (c.second.flags & Counter::kIsRate) - unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; + if (result.run_type == Run::RT_Aggregate && + result.aggregate_unit == StatisticUnit::kPercentage) { + s = StrFormat("%.2f", 100. * c.second.value); + unit = "%"; + } else { + s = HumanReadableNumber(c.second.value, c.second.oneK); + if (c.second.flags & Counter::kIsRate) + unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; + } if (output_options_ & OO_Tabular) { printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit); diff --git a/MicroBenchmarks/libs/benchmark/src/csv_reporter.cc b/MicroBenchmarks/libs/benchmark/src/csv_reporter.cc --- a/MicroBenchmarks/libs/benchmark/src/csv_reporter.cc +++ b/MicroBenchmarks/libs/benchmark/src/csv_reporter.cc @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "complexity.h" - #include #include #include @@ -22,7 +19,9 @@ #include #include +#include "benchmark/benchmark.h" #include "check.h" +#include "complexity.h" #include "string_util.h" #include "timers.h" @@ -37,13 +36,17 @@ "error_occurred", "error_message"}; } // namespace -std::string CsvEscape(const std::string & s) { +std::string CsvEscape(const std::string& s) { std::string tmp; tmp.reserve(s.size() + 2); for (char c : s) { switch (c) { - case '"' : tmp += "\"\""; break; - default : tmp += c; break; + case '"': + tmp += "\"\""; + break; + default: + tmp += c; + break; } } return '"' + tmp + '"'; diff --git a/MicroBenchmarks/libs/benchmark/src/cycleclock.h b/MicroBenchmarks/libs/benchmark/src/cycleclock.h --- a/MicroBenchmarks/libs/benchmark/src/cycleclock.h +++ b/MicroBenchmarks/libs/benchmark/src/cycleclock.h @@ -115,7 +115,7 @@ // the code is being compiled with a non-ancient compiler. _asm rdtsc #elif defined(COMPILER_MSVC) && defined(_M_ARM64) - // See https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019 + // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics // and https://reviews.llvm.org/D53115 int64_t virtual_timer_value; virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT); @@ -187,7 +187,7 @@ asm("stck %0" : "=Q"(tsc) : : "cc"); #endif return tsc; -#elif defined(__riscv) // RISC-V +#elif defined(__riscv) // RISC-V // Use RDCYCLE (and RDCYCLEH on riscv32) #if __riscv_xlen == 32 uint32_t cycles_lo, cycles_hi0, cycles_hi1; diff --git a/MicroBenchmarks/libs/benchmark/src/internal_macros.h b/MicroBenchmarks/libs/benchmark/src/internal_macros.h --- a/MicroBenchmarks/libs/benchmark/src/internal_macros.h +++ b/MicroBenchmarks/libs/benchmark/src/internal_macros.h @@ -44,6 +44,13 @@ #define BENCHMARK_OS_CYGWIN 1 #elif defined(_WIN32) #define BENCHMARK_OS_WINDOWS 1 + #if defined(WINAPI_FAMILY_PARTITION) + #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + #define BENCHMARK_OS_WINDOWS_WIN32 1 + #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define BENCHMARK_OS_WINDOWS_RT 1 + #endif + #endif #if defined(__MINGW32__) #define BENCHMARK_OS_MINGW 1 #endif diff --git a/MicroBenchmarks/libs/benchmark/src/json_reporter.cc b/MicroBenchmarks/libs/benchmark/src/json_reporter.cc --- a/MicroBenchmarks/libs/benchmark/src/json_reporter.cc +++ b/MicroBenchmarks/libs/benchmark/src/json_reporter.cc @@ -251,6 +251,15 @@ out << indent << FormatKV("threads", run.threads) << ",\n"; if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; + out << indent << FormatKV("aggregate_unit", [&run]() -> const char* { + switch (run.aggregate_unit) { + case StatisticUnit::kTime: + return "time"; + case StatisticUnit::kPercentage: + return "percentage"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; } if (run.error_occurred) { out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; @@ -258,8 +267,17 @@ } if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; - out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; - out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) + << ",\n"; + out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + out << indent << FormatKV("real_time", run.real_accumulated_time) + << ",\n"; + out << indent << FormatKV("cpu_time", run.cpu_accumulated_time); + } out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_big_o) { @@ -277,9 +295,20 @@ out << ",\n" << indent << FormatKV(c.first, c.second); } - if (run.has_memory_result) { + if (run.memory_result) { + const MemoryManager::Result memory_result = *run.memory_result; out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); - out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used); + out << ",\n" + << indent << FormatKV("max_bytes_used", memory_result.max_bytes_used); + + auto report_if_present = [&out, &indent](const char* label, int64_t val) { + if (val != MemoryManager::TombstoneValue) + out << ",\n" << indent << FormatKV(label, val); + }; + + report_if_present("total_allocated_bytes", + memory_result.total_allocated_bytes); + report_if_present("net_heap_growth", memory_result.net_heap_growth); } if (!run.report_label.empty()) { @@ -288,4 +317,7 @@ out << '\n'; } +const int64_t MemoryManager::TombstoneValue = + std::numeric_limits::max(); + } // end namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/perf_counters.h b/MicroBenchmarks/libs/benchmark/src/perf_counters.h --- a/MicroBenchmarks/libs/benchmark/src/perf_counters.h +++ b/MicroBenchmarks/libs/benchmark/src/perf_counters.h @@ -17,16 +17,24 @@ #include #include +#include #include #include "benchmark/benchmark.h" #include "check.h" #include "log.h" +#include "mutex.h" #ifndef BENCHMARK_OS_WINDOWS #include #endif +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + namespace benchmark { namespace internal { @@ -66,17 +74,19 @@ // Collect PMU counters. The object, once constructed, is ready to be used by // calling read(). PMU counter collection is enabled from the time create() is // called, to obtain the object, until the object's destructor is called. -class PerfCounters final { +class BENCHMARK_EXPORT PerfCounters final { public: // True iff this platform supports performance counters. static const bool kSupported; - bool IsValid() const { return is_valid_; } + bool IsValid() const { return !counter_names_.empty(); } static PerfCounters NoCounters() { return PerfCounters(); } - ~PerfCounters(); + ~PerfCounters() { CloseCounters(); } PerfCounters(PerfCounters&&) = default; PerfCounters(const PerfCounters&) = delete; + PerfCounters& operator=(PerfCounters&&) noexcept; + PerfCounters& operator=(const PerfCounters&) = delete; // Platform-specific implementations may choose to do some library // initialization here. @@ -111,55 +121,66 @@ private: PerfCounters(const std::vector& counter_names, std::vector&& counter_ids) - : counter_ids_(std::move(counter_ids)), - counter_names_(counter_names), - is_valid_(true) {} - PerfCounters() : is_valid_(false) {} + : counter_ids_(std::move(counter_ids)), counter_names_(counter_names) {} + PerfCounters() = default; + + void CloseCounters() const; std::vector counter_ids_; - const std::vector counter_names_; - const bool is_valid_; + std::vector counter_names_; }; // Typical usage of the above primitives. -class PerfCountersMeasurement final { +class BENCHMARK_EXPORT PerfCountersMeasurement final { public: - PerfCountersMeasurement(PerfCounters&& c) - : counters_(std::move(c)), - start_values_(counters_.IsValid() ? counters_.names().size() : 0), - end_values_(counters_.IsValid() ? counters_.names().size() : 0) {} - - bool IsValid() const { return counters_.IsValid(); } + PerfCountersMeasurement(const std::vector& counter_names); + ~PerfCountersMeasurement(); + + // The only way to get to `counters_` is after ctor-ing a + // `PerfCountersMeasurement`, which means that `counters_`'s state is, here, + // decided (either invalid or valid) and won't change again even if a ctor is + // concurrently running with this. This is preferring efficiency to + // maintainability, because the address of the static can be known at compile + // time. + bool IsValid() const { + MutexLock l(mutex_); + return counters_.IsValid(); + } BENCHMARK_ALWAYS_INLINE void Start() { assert(IsValid()); + MutexLock l(mutex_); // Tell the compiler to not move instructions above/below where we take // the snapshot. ClobberMemory(); - counters_.Snapshot(&start_values_); + valid_read_ &= counters_.Snapshot(&start_values_); ClobberMemory(); } - BENCHMARK_ALWAYS_INLINE std::vector> - StopAndGetMeasurements() { + BENCHMARK_ALWAYS_INLINE bool Stop( + std::vector>& measurements) { assert(IsValid()); + MutexLock l(mutex_); // Tell the compiler to not move instructions above/below where we take // the snapshot. ClobberMemory(); - counters_.Snapshot(&end_values_); + valid_read_ &= counters_.Snapshot(&end_values_); ClobberMemory(); - std::vector> ret; for (size_t i = 0; i < counters_.names().size(); ++i) { double measurement = static_cast(end_values_[i]) - static_cast(start_values_[i]); - ret.push_back({counters_.names()[i], measurement}); + measurements.push_back({counters_.names()[i], measurement}); } - return ret; + + return valid_read_; } private: - PerfCounters counters_; + static Mutex mutex_; + GUARDED_BY(mutex_) static int ref_count_; + GUARDED_BY(mutex_) static PerfCounters counters_; + bool valid_read_ = true; PerfCounterValues start_values_; PerfCounterValues end_values_; }; @@ -169,4 +190,8 @@ } // namespace internal } // namespace benchmark +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + #endif // BENCHMARK_PERF_COUNTERS_H diff --git a/MicroBenchmarks/libs/benchmark/src/perf_counters.cc b/MicroBenchmarks/libs/benchmark/src/perf_counters.cc --- a/MicroBenchmarks/libs/benchmark/src/perf_counters.cc +++ b/MicroBenchmarks/libs/benchmark/src/perf_counters.cc @@ -15,6 +15,7 @@ #include "perf_counters.h" #include +#include #include #if defined HAVE_LIBPFM @@ -49,7 +50,7 @@ const int mode = PFM_PLM3; // user mode only for (size_t i = 0; i < counter_names.size(); ++i) { const bool is_first = i == 0; - struct perf_event_attr attr{}; + struct perf_event_attr attr {}; attr.size = sizeof(attr); const int group_id = !is_first ? counter_ids[0] : -1; const auto& name = counter_names[i]; @@ -104,7 +105,7 @@ return PerfCounters(counter_names, std::move(counter_ids)); } -PerfCounters::~PerfCounters() { +void PerfCounters::CloseCounters() const { if (counter_ids_.empty()) { return; } @@ -126,7 +127,44 @@ return NoCounters(); } -PerfCounters::~PerfCounters() = default; +void PerfCounters::CloseCounters() const {} #endif // defined HAVE_LIBPFM + +Mutex PerfCountersMeasurement::mutex_; +int PerfCountersMeasurement::ref_count_ = 0; +PerfCounters PerfCountersMeasurement::counters_ = PerfCounters::NoCounters(); + +PerfCountersMeasurement::PerfCountersMeasurement( + const std::vector& counter_names) + : start_values_(counter_names.size()), end_values_(counter_names.size()) { + MutexLock l(mutex_); + if (ref_count_ == 0) { + counters_ = PerfCounters::Create(counter_names); + } + // We chose to increment it even if `counters_` ends up invalid, + // so that we don't keep trying to create, and also since the dtor + // will decrement regardless of `counters_`'s validity + ++ref_count_; + + BM_CHECK(!counters_.IsValid() || counters_.names() == counter_names); +} + +PerfCountersMeasurement::~PerfCountersMeasurement() { + MutexLock l(mutex_); + --ref_count_; + if (ref_count_ == 0) { + counters_ = PerfCounters::NoCounters(); + } +} + +PerfCounters& PerfCounters::operator=(PerfCounters&& other) noexcept { + if (this != &other) { + CloseCounters(); + + counter_ids_ = std::move(other.counter_ids_); + counter_names_ = std::move(other.counter_names_); + } + return *this; +} } // namespace internal } // namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/reporter.cc b/MicroBenchmarks/libs/benchmark/src/reporter.cc --- a/MicroBenchmarks/libs/benchmark/src/reporter.cc +++ b/MicroBenchmarks/libs/benchmark/src/reporter.cc @@ -12,23 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "timers.h" - #include - #include #include #include #include #include +#include "benchmark/benchmark.h" #include "check.h" #include "string_util.h" +#include "timers.h" namespace benchmark { namespace internal { -extern std::map* global_context; +extern std::map *global_context; } BenchmarkReporter::BenchmarkReporter() @@ -70,7 +68,7 @@ } if (internal::global_context != nullptr) { - for (const auto& kv: *internal::global_context) { + for (const auto &kv : *internal::global_context) { Out << kv.first << ": " << kv.second << "\n"; } } diff --git a/MicroBenchmarks/libs/benchmark/src/sleep.cc b/MicroBenchmarks/libs/benchmark/src/sleep.cc --- a/MicroBenchmarks/libs/benchmark/src/sleep.cc +++ b/MicroBenchmarks/libs/benchmark/src/sleep.cc @@ -35,7 +35,7 @@ void SleepForSeconds(double seconds) { SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); } -#else // BENCHMARK_OS_WINDOWS +#else // BENCHMARK_OS_WINDOWS void SleepForMicroseconds(int microseconds) { #ifdef BENCHMARK_OS_ZOS // z/OS does not support nanosleep. Instead call sleep() and then usleep() to @@ -43,8 +43,7 @@ // argument is greater than 1000000. div_t sleepTime = div(microseconds, kNumMicrosPerSecond); int seconds = sleepTime.quot; - while (seconds != 0) - seconds = sleep(seconds); + while (seconds != 0) seconds = sleep(seconds); while (usleep(sleepTime.rem) == -1 && errno == EINTR) ; #else diff --git a/MicroBenchmarks/libs/benchmark/src/statistics.h b/MicroBenchmarks/libs/benchmark/src/statistics.h --- a/MicroBenchmarks/libs/benchmark/src/statistics.h +++ b/MicroBenchmarks/libs/benchmark/src/statistics.h @@ -25,12 +25,18 @@ // Return a vector containing the mean, median and standard devation information // (and any user-specified info) for the specified list of reports. If 'reports' // contains less than two non-errored runs an empty vector is returned +BENCHMARK_EXPORT std::vector ComputeStats( const std::vector& reports); +BENCHMARK_EXPORT double StatisticsMean(const std::vector& v); +BENCHMARK_EXPORT double StatisticsMedian(const std::vector& v); +BENCHMARK_EXPORT double StatisticsStdDev(const std::vector& v); +BENCHMARK_EXPORT +double StatisticsCV(const std::vector& v); } // end namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/statistics.cc b/MicroBenchmarks/libs/benchmark/src/statistics.cc --- a/MicroBenchmarks/libs/benchmark/src/statistics.cc +++ b/MicroBenchmarks/libs/benchmark/src/statistics.cc @@ -13,15 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" +#include "statistics.h" #include #include #include #include #include + +#include "benchmark/benchmark.h" #include "check.h" -#include "statistics.h" namespace benchmark { @@ -74,6 +75,15 @@ return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); } +double StatisticsCV(const std::vector& v) { + if (v.size() < 2) return 0.0; + + const auto stddev = StatisticsStdDev(v); + const auto mean = StatisticsMean(v); + + return stddev / mean; +} + std::vector ComputeStats( const std::vector& reports) { typedef BenchmarkReporter::Run Run; @@ -155,6 +165,7 @@ data.repetitions = reports[0].repetitions; data.repetition_index = Run::no_repetition_index; data.aggregate_name = Stat.name_; + data.aggregate_unit = Stat.unit_; data.report_label = report_label; // It is incorrect to say that an aggregate is computed over @@ -167,13 +178,15 @@ data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); - // We will divide these times by data.iterations when reporting, but the - // data.iterations is not nessesairly the scale of these measurements, - // because in each repetition, these timers are sum over all the iterations. - // And if we want to say that the stats are over N repetitions and not - // M iterations, we need to multiply these by (N/M). - data.real_accumulated_time *= iteration_rescale_factor; - data.cpu_accumulated_time *= iteration_rescale_factor; + if (data.aggregate_unit == StatisticUnit::kTime) { + // We will divide these times by data.iterations when reporting, but the + // data.iterations is not necessarily the scale of these measurements, + // because in each repetition, these timers are sum over all the iters. + // And if we want to say that the stats are over N repetitions and not + // M iterations, we need to multiply these by (N/M). + data.real_accumulated_time *= iteration_rescale_factor; + data.cpu_accumulated_time *= iteration_rescale_factor; + } data.time_unit = reports[0].time_unit; diff --git a/MicroBenchmarks/libs/benchmark/src/string_util.h b/MicroBenchmarks/libs/benchmark/src/string_util.h --- a/MicroBenchmarks/libs/benchmark/src/string_util.h +++ b/MicroBenchmarks/libs/benchmark/src/string_util.h @@ -4,6 +4,8 @@ #include #include #include + +#include "benchmark/export.h" #include "internal_macros.h" namespace benchmark { @@ -12,6 +14,7 @@ std::string HumanReadableNumber(double n, double one_k = 1024.0); +BENCHMARK_EXPORT #if defined(__MINGW32__) __attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) #elif defined(__GNUC__) @@ -37,8 +40,11 @@ return ss.str(); } +BENCHMARK_EXPORT std::vector StrSplit(const std::string& str, char delim); +// Disable lint checking for this block since it re-implements C functions. +// NOLINTBEGIN #ifdef BENCHMARK_STL_ANDROID_GNUSTL /* * GNU STL in Android NDK lacks support for some C++11 functions, including @@ -47,14 +53,15 @@ * namespace, not std:: namespace. */ unsigned long stoul(const std::string& str, size_t* pos = nullptr, - int base = 10); + int base = 10); int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); double stod(const std::string& str, size_t* pos = nullptr); #else -using std::stoul; -using std::stoi; -using std::stod; +using std::stod; // NOLINT(misc-unused-using-decls) +using std::stoi; // NOLINT(misc-unused-using-decls) +using std::stoul; // NOLINT(misc-unused-using-decls) #endif +// NOLINTEND } // end namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/src/string_util.cc b/MicroBenchmarks/libs/benchmark/src/string_util.cc --- a/MicroBenchmarks/libs/benchmark/src/string_util.cc +++ b/MicroBenchmarks/libs/benchmark/src/string_util.cc @@ -151,7 +151,7 @@ auto buff_ptr = std::unique_ptr(new char[size]); // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // in the android-ndk - ret = vsnprintf(buff_ptr.get(), size, msg, args); + vsnprintf(buff_ptr.get(), size, msg, args); return std::string(buff_ptr.get()); } @@ -198,11 +198,10 @@ /* Check for errors and return */ if (strtoulErrno == ERANGE) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of unsigned long"); + throw std::out_of_range("stoul failed: " + str + + " is outside of range of unsigned long"); } else if (strEnd == strStart || strtoulErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); } if (pos != nullptr) { *pos = static_cast(strEnd - strStart); @@ -225,11 +224,10 @@ /* Check for errors and return */ if (strtolErrno == ERANGE || long(int(result)) != result) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of int"); + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); } else if (strEnd == strStart || strtolErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); } if (pos != nullptr) { *pos = static_cast(strEnd - strStart); @@ -252,11 +250,10 @@ /* Check for errors and return */ if (strtodErrno == ERANGE) { - throw std::out_of_range( - "stoul failed: " + str + " is outside of range of int"); + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); } else if (strEnd == strStart || strtodErrno != 0) { - throw std::invalid_argument( - "stoul failed: " + str + " is not an integer"); + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); } if (pos != nullptr) { *pos = static_cast(strEnd - strStart); diff --git a/MicroBenchmarks/libs/benchmark/src/sysinfo.cc b/MicroBenchmarks/libs/benchmark/src/sysinfo.cc --- a/MicroBenchmarks/libs/benchmark/src/sysinfo.cc +++ b/MicroBenchmarks/libs/benchmark/src/sysinfo.cc @@ -19,6 +19,7 @@ #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #include #include + #include #else #include @@ -55,9 +56,9 @@ #include #include #include +#include #include #include -#include #include #include "check.h" @@ -147,7 +148,7 @@ int mib[2]; mib[0] = CTL_HW; - if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ + if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")) { ValueUnion buff(sizeof(int)); if (Name == "hw.ncpu") { @@ -214,10 +215,9 @@ CPUInfo::Scaling CpuScaling(int num_cpus) { // We don't have a valid CPU count, so don't even bother. if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN; -#ifdef BENCHMARK_OS_QNX +#if defined(BENCHMARK_OS_QNX) return CPUInfo::Scaling::UNKNOWN; -#endif -#ifndef BENCHMARK_OS_WINDOWS +#elif !defined(BENCHMARK_OS_WINDOWS) // On Linux, the CPUfreq subsystem exposes CPU information as files on the // local file system. If reading the exported files fails, then we may not be // running on Linux, so we silently ignore all the read errors. @@ -225,11 +225,13 @@ for (int cpu = 0; cpu < num_cpus; ++cpu) { std::string governor_file = StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); - if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED; + if (ReadFromFile(governor_file, &res) && res != "performance") + return CPUInfo::Scaling::ENABLED; } return CPUInfo::Scaling::DISABLED; -#endif +#else return CPUInfo::Scaling::UNKNOWN; +#endif } int CountSetBitsInCPUMap(std::string Val) { @@ -344,6 +346,7 @@ C.num_sharing = static_cast(B.count()); C.level = Cache->Level; C.size = Cache->Size; + C.type = "Unknown"; switch (Cache->Type) { case CacheUnified: C.type = "Unified"; @@ -357,9 +360,6 @@ case CacheTrace: C.type = "Trace"; break; - default: - C.type = "Unknown"; - break; } res.push_back(C); } @@ -368,29 +368,29 @@ #elif BENCHMARK_OS_QNX std::vector GetCacheSizesQNX() { std::vector res; - struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr); + struct cacheattr_entry* cache = SYSPAGE_ENTRY(cacheattr); uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); - int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize ; - for(int i = 0; i < num; ++i ) { + int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize; + for (int i = 0; i < num; ++i) { CPUInfo::CacheInfo info; - switch (cache->flags){ - case CACHE_FLAG_INSTR : + switch (cache->flags) { + case CACHE_FLAG_INSTR: info.type = "Instruction"; info.level = 1; break; - case CACHE_FLAG_DATA : + case CACHE_FLAG_DATA: info.type = "Data"; info.level = 1; break; - case CACHE_FLAG_UNIFIED : + case CACHE_FLAG_UNIFIED: info.type = "Unified"; info.level = 2; break; - case CACHE_FLAG_SHARED : + case CACHE_FLAG_SHARED: info.type = "Shared"; info.level = 3; break; - default : + default: continue; break; } @@ -418,24 +418,23 @@ std::string GetSystemName() { #if defined(BENCHMARK_OS_WINDOWS) std::string str; - const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1; - TCHAR hostname[COUNT] = {'\0'}; + const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1; + TCHAR hostname[COUNT] = {'\0'}; DWORD DWCOUNT = COUNT; - if (!GetComputerName(hostname, &DWCOUNT)) - return std::string(""); + if (!GetComputerName(hostname, &DWCOUNT)) return std::string(""); #ifndef UNICODE str = std::string(hostname, DWCOUNT); #else - //Using wstring_convert, Is deprecated in C++17 + // Using wstring_convert, Is deprecated in C++17 using convert_type = std::codecvt_utf8; std::wstring_convert converter; std::wstring wStr(hostname, DWCOUNT); str = converter.to_bytes(wStr); #endif return str; -#else // defined(BENCHMARK_OS_WINDOWS) +#else // defined(BENCHMARK_OS_WINDOWS) #ifndef HOST_NAME_MAX -#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined +#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined #define HOST_NAME_MAX 64 #elif defined(BENCHMARK_OS_NACL) #define HOST_NAME_MAX 64 @@ -444,15 +443,15 @@ #elif defined(BENCHMARK_OS_RTEMS) #define HOST_NAME_MAX 256 #else -#warning "HOST_NAME_MAX not defined. using 64" +#pragma message("HOST_NAME_MAX not defined. using 64") #define HOST_NAME_MAX 64 #endif -#endif // def HOST_NAME_MAX +#endif // def HOST_NAME_MAX char hostname[HOST_NAME_MAX]; int retVal = gethostname(hostname, HOST_NAME_MAX); if (retVal != 0) return std::string(""); return std::string(hostname); -#endif // Catch-all POSIX block. +#endif // Catch-all POSIX block. } int GetNumCPUs() { @@ -474,8 +473,7 @@ // Returns -1 in case of a failure. int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); if (NumCPU < 0) { - fprintf(stderr, - "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", + fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno)); } return NumCPU; @@ -498,7 +496,8 @@ #if defined(__s390__) // s390 has another format in /proc/cpuinfo // it needs to be parsed differently - if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1); + if (SplitIdx != std::string::npos) + value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1); #else if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); #endif @@ -633,7 +632,7 @@ fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", FreqStr, strerror(errno)); -#elif defined BENCHMARK_OS_WINDOWS +#elif defined BENCHMARK_OS_WINDOWS_WIN32 // In NT, read MHz from the registry. If we fail to do so or we're in win9x // then make a crude estimate. DWORD data, data_size = sizeof(data); @@ -644,13 +643,13 @@ "~MHz", nullptr, &data, &data_size))) return static_cast((int64_t)data * (int64_t)(1000 * 1000)); // was mhz -#elif defined (BENCHMARK_OS_SOLARIS) - kstat_ctl_t *kc = kstat_open(); +#elif defined(BENCHMARK_OS_SOLARIS) + kstat_ctl_t* kc = kstat_open(); if (!kc) { std::cerr << "failed to open /dev/kstat\n"; return -1; } - kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); + kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); if (!ksp) { std::cerr << "failed to lookup in /dev/kstat\n"; return -1; @@ -659,7 +658,7 @@ std::cerr << "failed to read from /dev/kstat\n"; return -1; } - kstat_named_t *knp = + kstat_named_t* knp = (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); if (!knp) { std::cerr << "failed to lookup data in /dev/kstat\n"; @@ -673,7 +672,7 @@ double clock_hz = knp->value.ui64; kstat_close(kc); return clock_hz; -#elif defined (BENCHMARK_OS_QNX) +#elif defined(BENCHMARK_OS_QNX) return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * (int64_t)(1000 * 1000)); #endif diff --git a/MicroBenchmarks/libs/benchmark/src/thread_manager.h b/MicroBenchmarks/libs/benchmark/src/thread_manager.h --- a/MicroBenchmarks/libs/benchmark/src/thread_manager.h +++ b/MicroBenchmarks/libs/benchmark/src/thread_manager.h @@ -36,7 +36,6 @@ [this]() { return alive_threads_ == 0; }); } - public: struct Result { IterationCount iterations = 0; double real_time_used = 0; diff --git a/MicroBenchmarks/libs/benchmark/src/timers.cc b/MicroBenchmarks/libs/benchmark/src/timers.cc --- a/MicroBenchmarks/libs/benchmark/src/timers.cc +++ b/MicroBenchmarks/libs/benchmark/src/timers.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "timers.h" + #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS @@ -125,8 +126,8 @@ // syncronous system calls in Emscripten. return emscripten_get_now() * 1e-3; #elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 struct timespec spec; if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) return MakeTime(spec); @@ -149,13 +150,14 @@ &user_time); return MakeTime(kernel_time, user_time); #elif defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; thread_basic_info_data_t info; mach_port_t thread = pthread_mach_thread_np(pthread_self()); - if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == - KERN_SUCCESS) { + if (thread_info(thread, THREAD_BASIC_INFO, + reinterpret_cast(&info), + &count) == KERN_SUCCESS) { return MakeTime(info); } DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); @@ -191,11 +193,14 @@ long int offset_minutes; char tz_offset_sign = '+'; // tz_offset is set in one of three ways: - // * strftime with %z - This either returns empty or the ISO 8601 time. The maximum length an + // * strftime with %z - This either returns empty or the ISO 8601 time. The + // maximum length an // ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero). - // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to 19 for %02li, + // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to + // 19 for %02li, // one for :, up to 19 %02li, plus trailing zero). - // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus trailing zero). + // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus + // trailing zero). // // Thus, the maximum size this needs to be is 41. char tz_offset[41]; @@ -203,10 +208,10 @@ char storage[128]; #if defined(BENCHMARK_OS_WINDOWS) - std::tm *timeinfo_p = ::localtime(&now); + std::tm* timeinfo_p = ::localtime(&now); #else std::tm timeinfo; - std::tm *timeinfo_p = &timeinfo; + std::tm* timeinfo_p = &timeinfo; ::localtime_r(&now, &timeinfo); #endif @@ -223,10 +228,11 @@ tz_offset_sign = '-'; } - tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", - tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + tz_len = + ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", + tz_offset_sign, offset_minutes / 100, offset_minutes % 100); BM_CHECK(tz_len == kTzOffsetLen); - ((void)tz_len); // Prevent unused variable warning in optimized build. + ((void)tz_len); // Prevent unused variable warning in optimized build. } else { // Unknown offset. RFC3339 specifies that unknown local offsets should be // written as UTC time with -00:00 timezone. @@ -240,8 +246,8 @@ strncpy(tz_offset, "-00:00", kTzOffsetLen + 1); } - timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", - timeinfo_p); + timestamp_len = + std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", timeinfo_p); BM_CHECK(timestamp_len == kTimestampLen); // Prevent unused variable warning in optimized build. ((void)kTimestampLen); diff --git a/MicroBenchmarks/libs/benchmark/test/AssemblyTests.cmake b/MicroBenchmarks/libs/benchmark/test/AssemblyTests.cmake --- a/MicroBenchmarks/libs/benchmark/test/AssemblyTests.cmake +++ b/MicroBenchmarks/libs/benchmark/test/AssemblyTests.cmake @@ -23,6 +23,7 @@ macro(add_filecheck_test name) cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) add_library(${name} OBJECT ${name}.cc) + target_link_libraries(${name} PRIVATE benchmark::benchmark) set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") add_custom_target(copy_${name} ALL diff --git a/MicroBenchmarks/libs/benchmark/test/BUILD b/MicroBenchmarks/libs/benchmark/test/BUILD --- a/MicroBenchmarks/libs/benchmark/test/BUILD +++ b/MicroBenchmarks/libs/benchmark/test/BUILD @@ -21,6 +21,7 @@ PER_SRC_TEST_ARGS = ({ "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], "repetitions_test.cc": [" --benchmark_repetitions=3"], + "spec_arg_test.cc" : ["--benchmark_filter=BM_NotChosen"], }) load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") @@ -49,9 +50,8 @@ "//:benchmark", "//:benchmark_internal_headers", "@com_google_googletest//:gtest", - ] + ( - ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] - ), + "@com_google_googletest//:gtest_main", + ] # FIXME: Add support for assembly tests to bazel. # See Issue #556 # https://github.com/google/benchmark/issues/556 diff --git a/MicroBenchmarks/libs/benchmark/test/CMakeLists.txt b/MicroBenchmarks/libs/benchmark/test/CMakeLists.txt --- a/MicroBenchmarks/libs/benchmark/test/CMakeLists.txt +++ b/MicroBenchmarks/libs/benchmark/test/CMakeLists.txt @@ -1,5 +1,7 @@ # Enable the tests +set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) @@ -35,10 +37,11 @@ endif() add_library(output_test_helper STATIC output_test_helper.cc output_test.h) +target_link_libraries(output_test_helper PRIVATE benchmark::benchmark) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(${name} benchmark::benchmark_main ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_benchmark_test) macro(compile_benchmark_test_with_main name) @@ -48,7 +51,7 @@ macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) - target_link_libraries(${name} output_test_helper benchmark::benchmark + target_link_libraries(${name} output_test_helper benchmark::benchmark_main ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) @@ -56,6 +59,12 @@ compile_benchmark_test(benchmark_test) add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) +compile_benchmark_test(spec_arg_test) +add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) + +compile_benchmark_test(benchmark_setup_teardown_test) +add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) + compile_benchmark_test(filter_test) macro(add_filter_test name filter expect) add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) @@ -152,8 +161,8 @@ compile_output_test(memory_manager_test) add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) -check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) -if (BENCHMARK_HAS_CXX03_FLAG) +# MSVC does not allow to set the language standard to C++98/03. +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test PROPERTIES @@ -164,11 +173,17 @@ # causing the test to fail to compile. To prevent this we explicitly disable # the warning. check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) - if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) - set_target_properties(cxx03_test - PROPERTIES - LINK_FLAGS "-Wno-odr") + check_cxx_compiler_flag(-Wno-lto-type-mismatch BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + # Cannot set_target_properties multiple times here because the warnings will + # be overwritten on each call + set (DISABLE_LTO_WARNINGS "") + if (BENCHMARK_HAS_WNO_ODR) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-odr") + endif() + if (BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch") endif() + set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}") add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) endif() @@ -204,6 +219,7 @@ add_gtest(statistics_gtest) add_gtest(string_util_gtest) add_gtest(perf_counters_gtest) + add_gtest(time_unit_gtest) endif(BENCHMARK_ENABLE_GTEST_TESTS) ############################################################################### diff --git a/MicroBenchmarks/libs/benchmark/test/args_product_test.cc b/MicroBenchmarks/libs/benchmark/test/args_product_test.cc --- a/MicroBenchmarks/libs/benchmark/test/args_product_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/args_product_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark.h" - #include #include #include #include +#include "benchmark/benchmark.h" + class ArgsProductFixture : public ::benchmark::Fixture { public: ArgsProductFixture() @@ -37,7 +37,7 @@ virtual ~ArgsProductFixture() { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { + for (const auto& v : expectedValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; @@ -45,7 +45,7 @@ std::cout << "}\n"; } std::cout << "ACTUAL\n"; - for (auto v : actualValues) { + for (const auto& v : actualValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; diff --git a/MicroBenchmarks/libs/benchmark/test/basic_test.cc b/MicroBenchmarks/libs/benchmark/test/basic_test.cc --- a/MicroBenchmarks/libs/benchmark/test/basic_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/basic_test.cc @@ -13,7 +13,7 @@ void BM_spin_empty(benchmark::State& state) { for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { + for (auto x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } } @@ -22,11 +22,11 @@ BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); void BM_spin_pause_before(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -37,11 +37,11 @@ void BM_spin_pause_during(benchmark::State& state) { for (auto _ : state) { state.PauseTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } state.ResumeTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -62,11 +62,11 @@ void BM_spin_pause_after(benchmark::State& state) { for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -74,15 +74,15 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); void BM_spin_pause_before_and_after(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -96,7 +96,6 @@ BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); - void BM_KeepRunning(benchmark::State& state) { benchmark::IterationCount iter_count = 0; assert(iter_count == state.iterations()); @@ -142,10 +141,39 @@ } BENCHMARK(BM_RangedFor); +#ifdef BENCHMARK_HAS_CXX11 +template +void BM_OneTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + T sum = 0; + for (auto _ : state) { + sum += static_cast(arg); + } +} +BENCHMARK(BM_OneTemplateFunc)->Arg(1); +BENCHMARK(BM_OneTemplateFunc)->Arg(1); + +template +void BM_TwoTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + A sum = 0; + B prod = 1; + for (auto _ : state) { + sum += static_cast(arg); + prod *= static_cast(arg); + } +} +BENCHMARK(BM_TwoTemplateFunc)->Arg(1); +BENCHMARK(BM_TwoTemplateFunc)->Arg(1); + +#endif // BENCHMARK_HAS_CXX11 + // Ensure that StateIterator provides all the necessary typedefs required to // instantiate std::iterator_traits. -static_assert(std::is_same< - typename std::iterator_traits::value_type, - typename benchmark::State::StateIterator::value_type>::value, ""); +static_assert( + std::is_same::value_type, + typename benchmark::State::StateIterator::value_type>::value, + ""); BENCHMARK_MAIN(); diff --git a/MicroBenchmarks/libs/benchmark/test/benchmark_gtest.cc b/MicroBenchmarks/libs/benchmark/test/benchmark_gtest.cc --- a/MicroBenchmarks/libs/benchmark/test/benchmark_gtest.cc +++ b/MicroBenchmarks/libs/benchmark/test/benchmark_gtest.cc @@ -8,7 +8,7 @@ namespace benchmark { namespace internal { -extern std::map* global_context; +BENCHMARK_EXPORT extern std::map* global_context; namespace { diff --git a/MicroBenchmarks/libs/benchmark/test/benchmark_random_interleaving_gtest.cc b/MicroBenchmarks/libs/benchmark/test/benchmark_random_interleaving_gtest.cc --- a/MicroBenchmarks/libs/benchmark/test/benchmark_random_interleaving_gtest.cc +++ b/MicroBenchmarks/libs/benchmark/test/benchmark_random_interleaving_gtest.cc @@ -51,10 +51,9 @@ void Execute(const std::string& pattern) { queue->Clear(); - BenchmarkReporter* reporter = new NullReporter; + std::unique_ptr reporter(new NullReporter()); FLAGS_benchmark_filter = pattern; - RunSpecifiedBenchmarks(reporter); - delete reporter; + RunSpecifiedBenchmarks(reporter.get()); queue->Put("DONE"); // End marker } @@ -111,8 +110,8 @@ std::vector interleaving; interleaving.push_back(queue->Get()); interleaving.push_back(queue->Get()); - element_count[interleaving[0].c_str()]++; - element_count[interleaving[1].c_str()]++; + element_count[interleaving[0]]++; + element_count[interleaving[1]]++; interleaving_count[StrFormat("%s,%s", interleaving[0].c_str(), interleaving[1].c_str())]++; } diff --git a/MicroBenchmarks/libs/benchmark/test/benchmark_setup_teardown_test.cc b/MicroBenchmarks/libs/benchmark/test/benchmark_setup_teardown_test.cc new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/test/benchmark_setup_teardown_test.cc @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Test that Setup() and Teardown() are called exactly once +// for each benchmark run (single-threaded). +namespace single { +static int setup_call = 0; +static int teardown_call = 0; +} // namespace single +static void DoSetup1(const benchmark::State& state) { + ++single::setup_call; + + // Setup/Teardown should never be called with any thread_idx != 0. + assert(state.thread_index() == 0); +} + +static void DoTeardown1(const benchmark::State& state) { + ++single::teardown_call; + assert(state.thread_index() == 0); +} + +static void BM_with_setup(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_with_setup) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Iterations(100) + ->Setup(DoSetup1) + ->Teardown(DoTeardown1); + +// Test that Setup() and Teardown() are called once for each group of threads. +namespace concurrent { +static std::atomic setup_call(0); +static std::atomic teardown_call(0); +static std::atomic func_call(0); +} // namespace concurrent + +static void DoSetup2(const benchmark::State& state) { + concurrent::setup_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void DoTeardown2(const benchmark::State& state) { + concurrent::teardown_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void BM_concurrent(benchmark::State& state) { + for (auto s : state) { + } + concurrent::func_call.fetch_add(1, std::memory_order_acquire); +} + +BENCHMARK(BM_concurrent) + ->Setup(DoSetup2) + ->Teardown(DoTeardown2) + ->Iterations(100) + ->Threads(5) + ->Threads(10) + ->Threads(15); + +// Testing interaction with Fixture::Setup/Teardown +namespace fixture_interaction { +int setup = 0; +int fixture_setup = 0; +} // namespace fixture_interaction + +#define FIXTURE_BECHMARK_NAME MyFixture + +class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { + public: + void SetUp(const ::benchmark::State&) BENCHMARK_OVERRIDE { + fixture_interaction::fixture_setup++; + } + + ~FIXTURE_BECHMARK_NAME() {} +}; + +BENCHMARK_F(FIXTURE_BECHMARK_NAME, BM_WithFixture)(benchmark::State& st) { + for (auto _ : st) { + } +} + +static void DoSetupWithFixture(const benchmark::State&) { + fixture_interaction::setup++; +} + +BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, BM_WithFixture) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithFixture) + ->Repetitions(1) + ->Iterations(100); + +// Testing repetitions. +namespace repetitions { +int setup = 0; +} + +static void DoSetupWithRepetitions(const benchmark::State&) { + repetitions::setup++; +} +static void BM_WithRep(benchmark::State& state) { + for (auto _ : state) { + } +} + +BENCHMARK(BM_WithRep) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithRepetitions) + ->Iterations(100) + ->Repetitions(4); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + + size_t ret = benchmark::RunSpecifiedBenchmarks("."); + assert(ret > 0); + + // Setup/Teardown is called once for each arg group (1,3,5,7). + assert(single::setup_call == 4); + assert(single::teardown_call == 4); + + // 3 group of threads calling this function (3,5,10). + assert(concurrent::setup_call.load(std::memory_order_relaxed) == 3); + assert(concurrent::teardown_call.load(std::memory_order_relaxed) == 3); + assert((5 + 10 + 15) == + concurrent::func_call.load(std::memory_order_relaxed)); + + // Setup is called 4 times, once for each arg group (1,3,5,7) + assert(fixture_interaction::setup == 4); + // Fixture::Setup is called everytime the bm routine is run. + // The exact number is indeterministic, so we just assert that + // it's more than setup. + assert(fixture_interaction::fixture_setup > fixture_interaction::setup); + + // Setup is call once for each repetition * num_arg = 4 * 4 = 16. + assert(repetitions::setup == 16); + + return 0; +} diff --git a/MicroBenchmarks/libs/benchmark/test/benchmark_test.cc b/MicroBenchmarks/libs/benchmark/test/benchmark_test.cc --- a/MicroBenchmarks/libs/benchmark/test/benchmark_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/benchmark_test.cc @@ -93,8 +93,9 @@ state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); } -// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, -// non-timed part of each iteration will make the benchmark take forever. +// Test many inserts at once to reduce the total iterations needed. Otherwise, +// the slower, non-timed part of each iteration will make the benchmark take +// forever. BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template Range(1, 1 << 20); static void BM_SetupTeardown(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // No need to lock test_vector_mu here as this is running single-threaded. test_vector = new std::vector(); } @@ -139,7 +140,7 @@ test_vector->pop_back(); ++i; } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -156,11 +157,11 @@ static void BM_ParallelMemset(benchmark::State& state) { int64_t size = state.range(0) / static_cast(sizeof(int)); - int thread_size = static_cast(size) / state.threads; - int from = thread_size * state.thread_index; + int thread_size = static_cast(size) / state.threads(); + int from = thread_size * state.thread_index(); int to = from + thread_size; - if (state.thread_index == 0) { + if (state.thread_index() == 0) { test_vector = new std::vector(static_cast(size)); } @@ -172,7 +173,7 @@ } } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -214,7 +215,8 @@ std::pair(42, 3.8)); void BM_non_template_args(benchmark::State& state, int, double) { - while(state.KeepRunning()) {} + while (state.KeepRunning()) { + } } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); @@ -223,14 +225,14 @@ static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { case 1: - assert(st.threads == 1 || st.threads == 2 || st.threads == 3); + assert(st.threads() == 1 || st.threads() == 2 || st.threads() == 3); break; case 2: - assert(st.threads == 1 || st.threads == 3 || st.threads == 4); + assert(st.threads() == 1 || st.threads() == 3 || st.threads() == 4); break; case 3: - assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || - st.threads == 14); + assert(st.threads() == 5 || st.threads() == 8 || st.threads() == 11 || + st.threads() == 14); break; default: assert(false && "Invalid test case number"); diff --git a/MicroBenchmarks/libs/benchmark/test/clobber_memory_assembly_test.cc b/MicroBenchmarks/libs/benchmark/test/clobber_memory_assembly_test.cc --- a/MicroBenchmarks/libs/benchmark/test/clobber_memory_assembly_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/clobber_memory_assembly_test.cc @@ -9,7 +9,6 @@ extern int ExternInt; extern int ExternInt2; extern int ExternInt3; - } // CHECK-LABEL: test_basic: diff --git a/MicroBenchmarks/libs/benchmark/test/complexity_test.cc b/MicroBenchmarks/libs/benchmark/test/complexity_test.cc --- a/MicroBenchmarks/libs/benchmark/test/complexity_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/complexity_test.cc @@ -4,6 +4,7 @@ #include #include #include + #include "benchmark/benchmark.h" #include "output_test.h" @@ -12,9 +13,10 @@ #define ADD_COMPLEXITY_CASES(...) \ int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) -int AddComplexityTest(std::string test_name, std::string big_o_test_name, - std::string rms_test_name, std::string big_o, - int family_index) { +int AddComplexityTest(const std::string &test_name, + const std::string &big_o_test_name, + const std::string &rms_test_name, + const std::string &big_o, int family_index) { SetSubstitutions({{"%name", test_name}, {"%bigo_name", big_o_test_name}, {"%rms_name", rms_test_name}, @@ -36,6 +38,7 @@ {"\"repetitions\": %int,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"BigO\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"cpu_coefficient\": %float,$", MR_Next}, {"\"real_coefficient\": %float,$", MR_Next}, {"\"big_o\": \"%bigo\",$", MR_Next}, @@ -49,6 +52,7 @@ {"\"repetitions\": %int,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"RMS\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, {"\"rms\": %float$", MR_Next}, {"}", MR_Next}}); AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, @@ -63,7 +67,7 @@ // --------------------------- Testing BigO O(1) --------------------------- // // ========================================================================= // -void BM_Complexity_O1(benchmark::State& state) { +void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { for (int i = 0; i < 1024; ++i) { benchmark::DoNotOptimize(&i); @@ -112,7 +116,7 @@ return v; } -void BM_Complexity_O_N(benchmark::State& state) { +void BM_Complexity_O_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); // Test worst case scenario (item not in vector) const int64_t item_not_in_vector = state.range(0) * 2; @@ -154,7 +158,7 @@ // ------------------------- Testing BigO O(N*lgN) ------------------------- // // ========================================================================= // -static void BM_Complexity_O_N_log_N(benchmark::State& state) { +static void BM_Complexity_O_N_log_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); for (auto _ : state) { std::sort(v.begin(), v.end()); @@ -197,7 +201,7 @@ // -------- Testing formatting of Complexity with captured args ------------ // // ========================================================================= // -void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { +void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero benchmark::DoNotOptimize(state.iterations()); diff --git a/MicroBenchmarks/libs/benchmark/test/cxx03_test.cc b/MicroBenchmarks/libs/benchmark/test/cxx03_test.cc --- a/MicroBenchmarks/libs/benchmark/test/cxx03_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/cxx03_test.cc @@ -44,8 +44,7 @@ BENCHMARK_TEMPLATE1(BM_template1, int); template -struct BM_Fixture : public ::benchmark::Fixture { -}; +struct BM_Fixture : public ::benchmark::Fixture {}; BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { BM_empty(state); @@ -55,8 +54,8 @@ } void BM_counters(benchmark::State& state) { - BM_empty(state); - state.counters["Foo"] = 2; + BM_empty(state); + state.counters["Foo"] = 2; } BENCHMARK(BM_counters); diff --git a/MicroBenchmarks/libs/benchmark/test/diagnostics_test.cc b/MicroBenchmarks/libs/benchmark/test/diagnostics_test.cc --- a/MicroBenchmarks/libs/benchmark/test/diagnostics_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/diagnostics_test.cc @@ -26,7 +26,8 @@ } void try_invalid_pause_resume(benchmark::State& state) { -#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) +#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && \ + !defined(TEST_HAS_NO_EXCEPTIONS) try { state.PauseTiming(); std::abort(); @@ -57,13 +58,12 @@ } BENCHMARK(BM_diagnostic_test); - void BM_diagnostic_test_keep_running(benchmark::State& state) { static bool called_once = false; if (called_once == false) try_invalid_pause_resume(state); - while(state.KeepRunning()) { + while (state.KeepRunning()) { benchmark::DoNotOptimize(state.iterations()); } diff --git a/MicroBenchmarks/libs/benchmark/test/display_aggregates_only_test.cc b/MicroBenchmarks/libs/benchmark/test/display_aggregates_only_test.cc --- a/MicroBenchmarks/libs/benchmark/test/display_aggregates_only_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/display_aggregates_only_test.cc @@ -19,21 +19,23 @@ int main(int argc, char* argv[]) { const std::string output = GetFileReporterOutput(argc, argv); - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 7 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find 6 " + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find 8 " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " "output:\n"; std::cout << output; return 1; diff --git a/MicroBenchmarks/libs/benchmark/test/donotoptimize_assembly_test.cc b/MicroBenchmarks/libs/benchmark/test/donotoptimize_assembly_test.cc --- a/MicroBenchmarks/libs/benchmark/test/donotoptimize_assembly_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/donotoptimize_assembly_test.cc @@ -15,7 +15,7 @@ struct NotTriviallyCopyable { NotTriviallyCopyable(); explicit NotTriviallyCopyable(int x) : value(x) {} - NotTriviallyCopyable(NotTriviallyCopyable const&); + NotTriviallyCopyable(NotTriviallyCopyable const &); int value; }; @@ -23,7 +23,6 @@ int value; int data[2]; }; - } // CHECK-LABEL: test_with_rvalue: extern "C" void test_with_rvalue() { @@ -118,8 +117,7 @@ // CHECK-LABEL: test_inc_integer: extern "C" int test_inc_integer() { int x = 0; - for (int i=0; i < 5; ++i) - benchmark::DoNotOptimize(++x); + for (int i = 0; i < 5; ++i) benchmark::DoNotOptimize(++x); // CHECK: movl $1, [[DEST:.*]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] @@ -147,7 +145,7 @@ // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ret int x = 42; - int * const xp = &x; + int *const xp = &x; benchmark::DoNotOptimize(xp); } diff --git a/MicroBenchmarks/libs/benchmark/test/donotoptimize_test.cc b/MicroBenchmarks/libs/benchmark/test/donotoptimize_test.cc --- a/MicroBenchmarks/libs/benchmark/test/donotoptimize_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/donotoptimize_test.cc @@ -1,27 +1,28 @@ -#include "benchmark/benchmark.h" - #include +#include "benchmark/benchmark.h" + namespace { #if defined(__GNUC__) std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); #endif std::uint64_t double_up(const std::uint64_t x) { return x * 2; } -} +} // namespace // Using DoNotOptimize on types like BitRef seem to cause a lot of problems // with the inline assembly on both GCC and Clang. struct BitRef { int index; - unsigned char &byte; + unsigned char& byte; -public: + public: static BitRef Make() { static unsigned char arr[2] = {}; BitRef b(1, arr[0]); return b; } -private: + + private: BitRef(int i, unsigned char& b) : index(i), byte(b) {} }; diff --git a/MicroBenchmarks/libs/benchmark/test/filter_test.cc b/MicroBenchmarks/libs/benchmark/test/filter_test.cc --- a/MicroBenchmarks/libs/benchmark/test/filter_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/filter_test.cc @@ -70,7 +70,7 @@ } BENCHMARK(BM_FooBa); -int main(int argc, char **argv) { +int main(int argc, char** argv) { bool list_only = false; for (int i = 0; i < argc; ++i) list_only |= std::string(argv[i]).find("--benchmark_list_tests") != diff --git a/MicroBenchmarks/libs/benchmark/test/fixture_test.cc b/MicroBenchmarks/libs/benchmark/test/fixture_test.cc --- a/MicroBenchmarks/libs/benchmark/test/fixture_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/fixture_test.cc @@ -1,22 +1,22 @@ -#include "benchmark/benchmark.h" - #include #include +#include "benchmark/benchmark.h" + #define FIXTURE_BECHMARK_NAME MyFixture class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { public: void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { assert(data.get() == nullptr); data.reset(new int(42)); } } void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { assert(data.get() != nullptr); data.reset(); } @@ -27,7 +27,7 @@ std::unique_ptr data; }; -BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) { +BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State& st) { assert(data.get() != nullptr); assert(*data == 42); for (auto _ : st) { @@ -35,7 +35,7 @@ } BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) { - if (st.thread_index == 0) { + if (st.thread_index() == 0) { assert(data.get() != nullptr); assert(*data == 42); } diff --git a/MicroBenchmarks/libs/benchmark/test/internal_threading_test.cc b/MicroBenchmarks/libs/benchmark/test/internal_threading_test.cc --- a/MicroBenchmarks/libs/benchmark/test/internal_threading_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/internal_threading_test.cc @@ -3,6 +3,7 @@ #include #include + #include "../src/timers.h" #include "benchmark/benchmark.h" #include "output_test.h" diff --git a/MicroBenchmarks/libs/benchmark/test/map_test.cc b/MicroBenchmarks/libs/benchmark/test/map_test.cc --- a/MicroBenchmarks/libs/benchmark/test/map_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/map_test.cc @@ -1,8 +1,8 @@ -#include "benchmark/benchmark.h" - #include #include +#include "benchmark/benchmark.h" + namespace { std::map ConstructRandomMap(int size) { diff --git a/MicroBenchmarks/libs/benchmark/test/multiple_ranges_test.cc b/MicroBenchmarks/libs/benchmark/test/multiple_ranges_test.cc --- a/MicroBenchmarks/libs/benchmark/test/multiple_ranges_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/multiple_ranges_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark.h" - #include #include #include #include +#include "benchmark/benchmark.h" + class MultipleRangesFixture : public ::benchmark::Fixture { public: MultipleRangesFixture() @@ -42,7 +42,7 @@ virtual ~MultipleRangesFixture() { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { + for (const auto& v : expectedValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; @@ -50,7 +50,7 @@ std::cout << "}\n"; } std::cout << "ACTUAL\n"; - for (auto v : actualValues) { + for (const auto& v : actualValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; diff --git a/MicroBenchmarks/libs/benchmark/test/options_test.cc b/MicroBenchmarks/libs/benchmark/test/options_test.cc --- a/MicroBenchmarks/libs/benchmark/test/options_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/options_test.cc @@ -1,7 +1,8 @@ -#include "benchmark/benchmark.h" #include #include +#include "benchmark/benchmark.h" + #if defined(NDEBUG) #undef NDEBUG #endif @@ -65,11 +66,9 @@ // Test that the requested iteration count is respected. assert(state.max_iterations == 42); size_t actual_iterations = 0; - for (auto _ : state) - ++actual_iterations; + for (auto _ : state) ++actual_iterations; assert(state.iterations() == state.max_iterations); assert(state.iterations() == 42); - } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); diff --git a/MicroBenchmarks/libs/benchmark/test/output_test.h b/MicroBenchmarks/libs/benchmark/test/output_test.h --- a/MicroBenchmarks/libs/benchmark/test/output_test.h +++ b/MicroBenchmarks/libs/benchmark/test/output_test.h @@ -85,7 +85,7 @@ struct Results; typedef std::function ResultsCheckFn; -size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); +size_t AddChecker(const char* bm_name_pattern, const ResultsCheckFn& fn); // Class holding the results of a benchmark. // It is passed in calls to checker functions. @@ -113,9 +113,7 @@ return NumIterations() * GetTime(kRealTime); } // get the cpu_time duration of the benchmark in seconds - double DurationCPUTime() const { - return NumIterations() * GetTime(kCpuTime); - } + double DurationCPUTime() const { return NumIterations() * GetTime(kCpuTime); } // get the string for a result by name, or nullptr if the name // is not found diff --git a/MicroBenchmarks/libs/benchmark/test/output_test_helper.cc b/MicroBenchmarks/libs/benchmark/test/output_test_helper.cc --- a/MicroBenchmarks/libs/benchmark/test/output_test_helper.cc +++ b/MicroBenchmarks/libs/benchmark/test/output_test_helper.cc @@ -41,14 +41,17 @@ // clang-format off static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static std::string time_re = "([0-9]+[.])?[0-9]+"; + static std::string percentage_re = "[0-9]+[.][0-9]{2}"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, // human-readable float {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, + {"%percentage", percentage_re}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, {"%time", "[ ]*" + time_re + "[ ]+ns"}, {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"}, + {"%console_percentage_report", "[ ]*" + percentage_re + "[ ]+% [ ]*" + percentage_re + "[ ]+% [ ]*[0-9]+"}, {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"}, {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"}, @@ -138,7 +141,7 @@ class TestReporter : public benchmark::BenchmarkReporter { public: TestReporter(std::vector reps) - : reporters_(reps) {} + : reporters_(std::move(reps)) {} virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { bool last_ret = false; @@ -180,7 +183,7 @@ public: struct PatternAndFn : public TestCase { // reusing TestCase for its regexes PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} + : TestCase(rx), fn(std::move(fn_)) {} ResultsCheckFn fn; }; @@ -188,7 +191,7 @@ std::vector results; std::vector field_names; - void Add(const std::string& entry_pattern, ResultsCheckFn fn); + void Add(const std::string& entry_pattern, const ResultsCheckFn& fn); void CheckResults(std::stringstream& output); @@ -207,7 +210,8 @@ } // add a results checker for a benchmark -void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { +void ResultsChecker::Add(const std::string& entry_pattern, + const ResultsCheckFn& fn) { check_patterns.emplace_back(entry_pattern, fn); } @@ -296,7 +300,7 @@ } // end namespace internal -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { +size_t AddChecker(const char* bm_name, const ResultsCheckFn& fn) { auto& rc = internal::GetResultsChecker(); rc.Add(bm_name, fn); return rc.results.size(); @@ -314,9 +318,7 @@ return num; } -double Results::NumIterations() const { - return GetAs("iterations"); -} +double Results::NumIterations() const { return GetAs("iterations"); } double Results::GetTime(BenchmarkTime which) const { BM_CHECK(which == kCpuTime || which == kRealTime); @@ -381,10 +383,8 @@ // Disable deprecated warnings temporarily because we need to reference // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif +BENCHMARK_DISABLE_DEPRECATED_WARNING + void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); @@ -443,9 +443,7 @@ internal::GetResultsChecker().CheckResults(csv.out_stream); } -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif +BENCHMARK_RESTORE_DEPRECATED_WARNING int SubstrCnt(const std::string& haystack, const std::string& pat) { if (pat.length() == 0) return 0; @@ -469,9 +467,8 @@ static std::string GetRandomFileName() { std::string model = "test.%%%%%%"; - for (auto & ch : model) { - if (ch == '%') - ch = RandomHexChar(); + for (auto& ch : model) { + if (ch == '%') ch = RandomHexChar(); } return model; } @@ -488,8 +485,7 @@ int retries = 3; while (--retries) { std::string name = GetRandomFileName(); - if (!FileExists(name)) - return name; + if (!FileExists(name)) return name; } std::cerr << "Failed to create unique temporary file name" << std::endl; std::abort(); diff --git a/MicroBenchmarks/libs/benchmark/test/perf_counters_gtest.cc b/MicroBenchmarks/libs/benchmark/test/perf_counters_gtest.cc --- a/MicroBenchmarks/libs/benchmark/test/perf_counters_gtest.cc +++ b/MicroBenchmarks/libs/benchmark/test/perf_counters_gtest.cc @@ -5,12 +5,13 @@ #ifndef GTEST_SKIP struct MsgHandler { - void operator=(std::ostream&){} + void operator=(std::ostream&) {} }; #define GTEST_SKIP() return MsgHandler() = std::cout #endif using benchmark::internal::PerfCounters; +using benchmark::internal::PerfCountersMeasurement; using benchmark::internal::PerfCounterValues; namespace { @@ -95,6 +96,53 @@ EXPECT_GT(values2[1], 0); } +TEST(PerfCountersTest, ReopenExistingCounters) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector counters; + counters.reserve(6); + for (int i = 0; i < 6; i++) + counters.push_back(PerfCounters::Create({kGenericPerfEvent1})); + PerfCounterValues values(1); + EXPECT_TRUE(counters[0].Snapshot(&values)); + EXPECT_FALSE(counters[4].Snapshot(&values)); + EXPECT_FALSE(counters[5].Snapshot(&values)); +} + +TEST(PerfCountersTest, CreateExistingMeasurements) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector perf_counter_measurements; + std::vector> measurements; + + perf_counter_measurements.reserve(10); + for (int i = 0; i < 10; i++) + perf_counter_measurements.emplace_back( + std::vector{kGenericPerfEvent1}); + + perf_counter_measurements[0].Start(); + EXPECT_TRUE(perf_counter_measurements[0].Stop(measurements)); + + measurements.clear(); + perf_counter_measurements[8].Start(); + EXPECT_FALSE(perf_counter_measurements[8].Stop(measurements)); + + measurements.clear(); + perf_counter_measurements[9].Start(); + EXPECT_FALSE(perf_counter_measurements[9].Stop(measurements)); +} + size_t do_work() { size_t res = 0; for (size_t i = 0; i < 100000000; ++i) res += i * i; diff --git a/MicroBenchmarks/libs/benchmark/test/register_benchmark_test.cc b/MicroBenchmarks/libs/benchmark/test/register_benchmark_test.cc --- a/MicroBenchmarks/libs/benchmark/test/register_benchmark_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/register_benchmark_test.cc @@ -36,7 +36,7 @@ BM_CHECK(run.report_label == label) << "expected " << label << " got " << run.report_label; } else { - BM_CHECK(run.report_label == ""); + BM_CHECK(run.report_label.empty()); } // clang-format on } @@ -45,7 +45,7 @@ std::vector ExpectedResults; int AddCases(std::initializer_list const& v) { - for (auto N : v) { + for (const auto& N : v) { ExpectedResults.push_back(N); } return 0; diff --git a/MicroBenchmarks/libs/benchmark/test/repetitions_test.cc b/MicroBenchmarks/libs/benchmark/test/repetitions_test.cc --- a/MicroBenchmarks/libs/benchmark/test/repetitions_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/repetitions_test.cc @@ -59,6 +59,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -73,6 +74,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -87,6 +89,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -164,6 +167,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -177,6 +181,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -190,6 +195,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, diff --git a/MicroBenchmarks/libs/benchmark/test/report_aggregates_only_test.cc b/MicroBenchmarks/libs/benchmark/test/report_aggregates_only_test.cc --- a/MicroBenchmarks/libs/benchmark/test/report_aggregates_only_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/report_aggregates_only_test.cc @@ -19,17 +19,19 @@ int main(int argc, char* argv[]) { const std::string output = GetFileReporterOutput(argc, argv); - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 4 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find three " + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find four " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " "output:\n"; std::cout << output; return 1; diff --git a/MicroBenchmarks/libs/benchmark/test/reporter_output_test.cc b/MicroBenchmarks/libs/benchmark/test/reporter_output_test.cc --- a/MicroBenchmarks/libs/benchmark/test/reporter_output_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/reporter_output_test.cc @@ -1,5 +1,6 @@ #undef NDEBUG +#include #include #include "benchmark/benchmark.h" @@ -454,6 +455,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, {"\"family_index\": 15,$", MR_Next}, @@ -463,6 +465,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}, {"\"family_index\": 15,$", MR_Next}, @@ -472,6 +475,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, {"^\"BM_Repeat/repeats:2\",%csv_report$"}, @@ -519,6 +523,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, {"\"family_index\": 16,$", MR_Next}, @@ -528,6 +533,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}, {"\"family_index\": 16,$", MR_Next}, @@ -537,6 +543,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, @@ -594,6 +601,7 @@ {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, {"\"family_index\": 17,$", MR_Next}, @@ -603,6 +611,7 @@ {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}, {"\"family_index\": 17,$", MR_Next}, @@ -612,6 +621,7 @@ {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, {"^\"BM_Repeat/repeats:4\",%csv_report$"}, @@ -661,6 +671,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, {"\"family_index\": 19,$", MR_Next}, @@ -670,6 +681,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, {"\"family_index\": 19,$", MR_Next}, @@ -679,6 +691,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, @@ -709,6 +722,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, {"\"family_index\": 20,$", MR_Next}, @@ -718,6 +732,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, {"\"family_index\": 20,$", MR_Next}, @@ -727,6 +742,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, @@ -761,6 +777,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, @@ -771,6 +788,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, @@ -781,6 +799,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, @@ -869,6 +888,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, @@ -880,6 +900,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, @@ -891,6 +912,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, @@ -902,6 +924,7 @@ {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); ADD_CASES( @@ -916,6 +939,154 @@ "manual_time_stddev\",%csv_report$"}, {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}}); +// ========================================================================= // +// ------------- Testing relative standard deviation statistics ------------ // +// ========================================================================= // + +const auto UserPercentStatistics = [](const std::vector&) { + return 1. / 100.; +}; +void BM_UserPercentStats(benchmark::State& state) { + for (auto _ : state) { + state.SetIterationTime(150 / 10e8); + } +} +// clang-format off +BENCHMARK(BM_UserPercentStats) + ->Repetitions(3) + ->Iterations(5) + ->UseManualTime() + ->Unit(benchmark::TimeUnit::kNanosecond) + ->ComputeStatistics("", UserPercentStatistics, benchmark::StatisticUnit::kPercentage); +// clang-format on + +// check that UserPercent-provided stats is calculated, and is after the +// default-ones empty string as name is intentional, it would sort before +// anything else +ADD_CASES(TC_ConsoleOut, + {{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time_ " + "[ ]* 1.00 % [ ]* 1.00 %[ ]*3$"}}); +ADD_CASES( + TC_JSONOut, + {{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.(0)*e-(0)*2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_\",%csv_report$"}}); + // ========================================================================= // // ------------------------- Testing StrEscape JSON ------------------------ // // ========================================================================= // diff --git a/MicroBenchmarks/libs/benchmark/test/skip_with_error_test.cc b/MicroBenchmarks/libs/benchmark/test/skip_with_error_test.cc --- a/MicroBenchmarks/libs/benchmark/test/skip_with_error_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/skip_with_error_test.cc @@ -97,7 +97,7 @@ void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.SkipWithError("error message"); @@ -119,12 +119,13 @@ void BM_error_during_running_ranged_for(benchmark::State& state) { assert(state.max_iterations > 3 && "test requires at least a few iterations"); - int first_iter = true; + bool first_iter = true; // NOTE: Users should not write the for loop explicitly. for (auto It = state.begin(), End = state.end(); It != End; ++It) { if (state.range(0) == 1) { assert(first_iter); first_iter = false; + (void)first_iter; state.SkipWithError("error message"); // Test the unfortunate but documented behavior that the ranged-for loop // doesn't automatically terminate when SkipWithError is set. @@ -142,7 +143,7 @@ for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } - if (state.thread_index <= (state.threads / 2)) + if (state.thread_index() <= (state.threads() / 2)) state.SkipWithError("error message"); } BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); @@ -154,7 +155,7 @@ void BM_error_while_paused(benchmark::State& state) { bool first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.PauseTiming(); diff --git a/MicroBenchmarks/libs/benchmark/test/spec_arg_test.cc b/MicroBenchmarks/libs/benchmark/test/spec_arg_test.cc new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/test/spec_arg_test.cc @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Tests that we can override benchmark-spec value from FLAGS_benchmark_filter +// with argument to RunSpecifiedBenchmarks(...). + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + matched_functions.push_back(report[0].run_name.function_name); + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector& GetMatchedFunctions() const { + return matched_functions; + } + + private: + std::vector matched_functions; +}; + +} // end namespace + +static void BM_NotChosen(benchmark::State& state) { + assert(false && "SHOULD NOT BE CALLED"); + for (auto _ : state) { + } +} +BENCHMARK(BM_NotChosen); + +static void BM_Chosen(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Chosen); + +int main(int argc, char** argv) { + const std::string flag = "BM_NotChosen"; + + // Verify that argv specify --benchmark_filter=BM_NotChosen. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--benchmark_filter=BM_NotChosen", argv[i]) == 0) { + found = true; + break; + } + } + assert(found); + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkFilter() function. + if (flag != benchmark::GetBenchmarkFilter()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkFilter() returns [" + << benchmark::GetBenchmarkFilter() << "] expected flag=[" << flag + << "]\n"; + return 1; + } + TestReporter test_reporter; + const char* const spec = "BM_Chosen"; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, spec); + assert(returned_count == 1); + const std::vector matched_functions = + test_reporter.GetMatchedFunctions(); + assert(matched_functions.size() == 1); + if (strcmp(spec, matched_functions.front().c_str()) != 0) { + std::cerr << "Expected benchmark [" << spec << "] to run, but got [" + << matched_functions.front() << "]\n"; + return 2; + } + + // Test that SetBenchmarkFilter works. + const std::string golden_value = "golden_value"; + benchmark::SetBenchmarkFilter(golden_value); + std::string current_value = benchmark::GetBenchmarkFilter(); + if (golden_value != current_value) { + std::cerr << "Expected [" << golden_value + << "] for --benchmark_filter but got [" << current_value << "]\n"; + return 3; + } + return 0; +} diff --git a/MicroBenchmarks/libs/benchmark/test/statistics_gtest.cc b/MicroBenchmarks/libs/benchmark/test/statistics_gtest.cc --- a/MicroBenchmarks/libs/benchmark/test/statistics_gtest.cc +++ b/MicroBenchmarks/libs/benchmark/test/statistics_gtest.cc @@ -25,4 +25,11 @@ 1.151086443322134); } +TEST(StatisticsTest, CV) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), + 0.32888184094918121); +} + } // end namespace diff --git a/MicroBenchmarks/libs/benchmark/test/string_util_gtest.cc b/MicroBenchmarks/libs/benchmark/test/string_util_gtest.cc --- a/MicroBenchmarks/libs/benchmark/test/string_util_gtest.cc +++ b/MicroBenchmarks/libs/benchmark/test/string_util_gtest.cc @@ -2,8 +2,8 @@ // statistics_test - Unit tests for src/statistics.cc //===---------------------------------------------------------------------===// -#include "../src/string_util.h" #include "../src/internal_macros.h" +#include "../src/string_util.h" #include "gtest/gtest.h" namespace { @@ -32,7 +32,8 @@ #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul { size_t pos = 0; - EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, + benchmark::stoul("18446744073709551615", &pos)); EXPECT_EQ(20ul, pos); } #endif @@ -62,91 +63,81 @@ EXPECT_EQ(4ul, pos); } #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); - } + { ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); } #endif } -TEST(StringUtilTest, stoi) { - { - size_t pos = 0; - EXPECT_EQ(0, benchmark::stoi("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); - EXPECT_EQ(4ul, pos); - } +TEST(StringUtilTest, stoi){{size_t pos = 0; +EXPECT_EQ(0, benchmark::stoi("0", &pos)); +EXPECT_EQ(1ul, pos); +} // namespace +{ + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); +} #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); - } +{ ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); } #endif } -TEST(StringUtilTest, stod) { - { - size_t pos = 0; - EXPECT_EQ(0.0, benchmark::stod("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - /* Note: exactly representable as double */ - EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); - EXPECT_EQ(8ul, pos); - } +TEST(StringUtilTest, stod){{size_t pos = 0; +EXPECT_EQ(0.0, benchmark::stod("0", &pos)); +EXPECT_EQ(1ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8ul, pos); +} #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); - } +{ ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); } #endif } diff --git a/MicroBenchmarks/libs/benchmark/test/templated_fixture_test.cc b/MicroBenchmarks/libs/benchmark/test/templated_fixture_test.cc --- a/MicroBenchmarks/libs/benchmark/test/templated_fixture_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/templated_fixture_test.cc @@ -1,9 +1,9 @@ -#include "benchmark/benchmark.h" - #include #include +#include "benchmark/benchmark.h" + template class MyFixture : public ::benchmark::Fixture { public: diff --git a/MicroBenchmarks/libs/benchmark/test/time_unit_gtest.cc b/MicroBenchmarks/libs/benchmark/test/time_unit_gtest.cc new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/test/time_unit_gtest.cc @@ -0,0 +1,37 @@ +#include "../include/benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace internal { + +namespace { + +class DummyBenchmark : public Benchmark { + public: + DummyBenchmark() : Benchmark("dummy") {} + virtual void Run(State&) override {} +}; + +TEST(DefaultTimeUnitTest, TimeUnitIsNotSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); +} + +TEST(DefaultTimeUnitTest, DefaultIsSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); + SetDefaultTimeUnit(kMillisecond); + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +TEST(DefaultTimeUnitTest, DefaultAndExplicitUnitIsSet) { + DummyBenchmark benchmark; + benchmark.Unit(kMillisecond); + SetDefaultTimeUnit(kMicrosecond); + + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/MicroBenchmarks/libs/benchmark/test/user_counters_tabular_test.cc b/MicroBenchmarks/libs/benchmark/test/user_counters_tabular_test.cc --- a/MicroBenchmarks/libs/benchmark/test/user_counters_tabular_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/user_counters_tabular_test.cc @@ -18,12 +18,14 @@ {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, @@ -125,6 +127,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -146,6 +149,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -167,6 +171,29 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -231,6 +258,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 2,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -252,6 +280,29 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 2,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -278,6 +329,9 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); @@ -293,6 +347,9 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckTabular(Results const& e) { diff --git a/MicroBenchmarks/libs/benchmark/test/user_counters_test.cc b/MicroBenchmarks/libs/benchmark/test/user_counters_test.cc --- a/MicroBenchmarks/libs/benchmark/test/user_counters_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/user_counters_test.cc @@ -26,7 +26,7 @@ for (auto _ : state) { } state.counters["foo"] = 1; - state.counters["bar"] = 2 * (double)state.iterations(); + state.counters["bar"] = 2 * static_cast(state.iterations()); } BENCHMARK(BM_Counters_Simple); ADD_CASES(TC_ConsoleOut, diff --git a/MicroBenchmarks/libs/benchmark/test/user_counters_thousands_test.cc b/MicroBenchmarks/libs/benchmark/test/user_counters_thousands_test.cc --- a/MicroBenchmarks/libs/benchmark/test/user_counters_thousands_test.cc +++ b/MicroBenchmarks/libs/benchmark/test/user_counters_thousands_test.cc @@ -96,6 +96,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -115,6 +116,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -134,6 +136,7 @@ {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, diff --git a/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run0.json b/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run0.json new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run0.json @@ -0,0 +1,21 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "whocares", + "run_type": "aggregate", + "aggregate_name": "zz", + "aggregate_unit": "percentage", + "iterations": 1000, + "real_time": 0.01, + "cpu_time": 0.10, + "time_unit": "ns" + } + ] +} diff --git a/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run1.json b/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run1.json new file mode 100644 --- /dev/null +++ b/MicroBenchmarks/libs/benchmark/tools/gbench/Inputs/test4_run1.json @@ -0,0 +1,21 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "whocares", + "run_type": "aggregate", + "aggregate_name": "zz", + "aggregate_unit": "percentage", + "iterations": 1000, + "real_time": 0.005, + "cpu_time": 0.15, + "time_unit": "ns" + } + ] +} diff --git a/MicroBenchmarks/libs/benchmark/tools/gbench/report.py b/MicroBenchmarks/libs/benchmark/tools/gbench/report.py --- a/MicroBenchmarks/libs/benchmark/tools/gbench/report.py +++ b/MicroBenchmarks/libs/benchmark/tools/gbench/report.py @@ -7,7 +7,9 @@ import copy import random -from scipy.stats import mannwhitneyu +from scipy.stats import mannwhitneyu, gmean +from numpy import array +from pandas import Timedelta class BenchmarkColor(object): @@ -150,6 +152,30 @@ return partitions +def get_timedelta_field_as_seconds(benchmark, field_name): + """ + Get value of field_name field of benchmark, which is time with time unit + time_unit, as time in seconds. + """ + time_unit = benchmark['time_unit'] if 'time_unit' in benchmark else 's' + dt = Timedelta(benchmark[field_name], time_unit) + return dt / Timedelta(1, 's') + + +def calculate_geomean(json): + """ + Extract all real/cpu times from all the benchmarks as seconds, + and calculate their geomean. + """ + times = [] + for benchmark in json['benchmarks']: + if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate': + continue + times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'), + get_timedelta_field_as_seconds(benchmark, 'cpu_time')]) + return gmean(times) if times else array([]) + + def extract_field(partition, field_name): # The count of elements may be different. We want *all* of them. lhs = [x[field_name] for x in partition[0]] @@ -174,6 +200,7 @@ return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue + def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): def get_utest_color(pval): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN @@ -242,7 +269,8 @@ if utest: timings_cpu = extract_field(partition, 'cpu_time') timings_time = extract_field(partition, 'real_time') - have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time) + have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest( + timings_cpu, timings_time) if cpu_pvalue and time_pvalue: utest_results = { 'have_optimal_repetitions': have_optimal_repetitions, @@ -268,6 +296,25 @@ 'utest': utest_results }) + lhs_gmean = calculate_geomean(json1) + rhs_gmean = calculate_geomean(json2) + if lhs_gmean.any() and rhs_gmean.any(): + diff_report.append({ + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{ + 'real_time': lhs_gmean[0], + 'cpu_time': lhs_gmean[1], + 'real_time_other': rhs_gmean[0], + 'cpu_time_other': rhs_gmean[1], + 'time': calculate_change(lhs_gmean[0], rhs_gmean[0]), + 'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1]) + }], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + }) + return diff_report @@ -307,19 +354,19 @@ if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate': for measurement in benchmark['measurements']: output_strs += [color_format(use_color, - fmt_str, - BC_HEADER, - benchmark['name'], - first_col_width, - get_color(measurement['time']), - measurement['time'], - get_color(measurement['cpu']), - measurement['cpu'], - measurement['real_time'], - measurement['real_time_other'], - measurement['cpu_time'], - measurement['cpu_time_other'], - endc=BC_ENDC)] + fmt_str, + BC_HEADER, + benchmark['name'], + first_col_width, + get_color(measurement['time']), + measurement['time'], + get_color(measurement['cpu']), + measurement['cpu'], + measurement['real_time'], + measurement['real_time_other'], + measurement['cpu_time'], + measurement['cpu_time_other'], + endc=BC_ENDC)] # After processing the measurements, if requested and # if applicable (e.g. u-test exists for given benchmark), @@ -403,6 +450,7 @@ '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], + ['OVERALL_GEOMEAN', '-0.8344', '-0.8026', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, use_color=False) @@ -489,6 +537,15 @@ 'time_unit': 's', 'utest': {} }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 1.193776641714438e-06, 'cpu_time': 1.2144445585302297e-06, + 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07, + 'time': -0.834399601997324, 'cpu': -0.8025889499549471}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', 'utest': {} + }, ] self.assertEqual(len(self.json_diff_report), len(expected_output)) for out, expected in zip( @@ -524,6 +581,7 @@ ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], + ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, use_color=False) @@ -561,6 +619,16 @@ 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}], 'time_unit': 'ns', 'utest': {} + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08, + 'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08, + 'time': -0.5000000000000009, 'cpu': -0.5000000000000009}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) @@ -599,8 +667,8 @@ ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', - '0.6985', - '0.6985', + '1.0000', + '0.6667', 'U', 'Test,', 'Repetitions:', @@ -617,7 +685,7 @@ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', - '0.1489', + '0.2000', 'U', 'Test,', 'Repetitions:', @@ -631,6 +699,7 @@ 'repetitions', 'recommended.'], ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) @@ -646,8 +715,8 @@ expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two_pvalue', - '0.6985', - '0.6985', + '1.0000', + '0.6667', 'U', 'Test,', 'Repetitions:', @@ -664,7 +733,7 @@ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', - '0.1489', + '0.2000', 'U', 'Test,', 'Repetitions:', @@ -677,6 +746,7 @@ '9+', 'repetitions', 'recommended.'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) @@ -717,7 +787,7 @@ ], 'time_unit': 'ns', 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387 + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 } }, { @@ -738,7 +808,7 @@ ], 'time_unit': 'ns', 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772 + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 } }, { @@ -753,6 +823,16 @@ ], 'time_unit': 'ns', 'utest': {} + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, + 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, + 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) @@ -792,8 +872,8 @@ ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', - '0.6985', - '0.6985', + '1.0000', + '0.6667', 'U', 'Test,', 'Repetitions:', @@ -810,7 +890,7 @@ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', - '0.1489', + '0.2000', 'U', 'Test,', 'Repetitions:', @@ -823,7 +903,8 @@ '9+', 'repetitions', 'recommended.'], - ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'] + ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, @@ -865,7 +946,7 @@ ], 'time_unit': 'ns', 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387 + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 } }, { @@ -886,7 +967,7 @@ ], 'time_unit': 'ns', 'utest': { - 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772 + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 } }, { @@ -898,11 +979,83 @@ 'real_time': 8, 'cpu_time_other': 53, 'cpu': -0.3375 - } + } ], 'utest': {}, 'time_unit': u'ns', 'aggregate_name': '' + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, + 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, + 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +class TestReportDifferenceForPercentageAggregates( + unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test4_run0.json') + testOutput2 = os.path.join(testInputs, 'test4_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_lines = [ + ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0'] + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, + utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'whocares', + 'measurements': [ + {'time': -0.5, + 'cpu': 0.5, + 'real_time': 0.01, + 'real_time_other': 0.005, + 'cpu_time': 0.10, + 'cpu_time_other': 0.15} + ], + 'time_unit': 'ns', + 'utest': {} } ] self.assertEqual(len(self.json_diff_report), len(expected_output)) diff --git a/MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/__init__.py b/default.profraw old mode 100755 new mode 100644 rename from MicroBenchmarks/libs/benchmark/googletest/googlemock/scripts/generator/cpp/__init__.py rename to default.profraw