From f9e826db524ed77061bc39b35ec492d6ce71f34b Mon Sep 17 00:00:00 2001 From: CrazyRong Date: Sat, 20 Apr 2024 16:23:27 +0800 Subject: [PATCH 1/4] =?UTF-8?q?=E5=8D=87=E7=BA=A7benchmark=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E5=88=B0=E7=89=88=E6=9C=AC=E7=81=AB=E8=BD=A6=E8=A6=81?= =?UTF-8?q?=E6=B1=82=E7=9A=841.8.3=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: CrazyRong --- AUTHORS | 10 +- BUILD.bazel | 39 +- BUILD.gn | 2 +- CMakeLists.txt | 72 ++- CONTRIBUTORS | 12 +- MODULE.bazel | 24 + README.OpenSource | 6 +- README.md | 13 +- WORKSPACE | 44 +- WORKSPACE.bzlmod | 2 + bazel/benchmark_deps.bzl | 65 +++ bindings/python/build_defs.bzl | 4 +- bindings/python/google_benchmark/BUILD | 8 +- bindings/python/google_benchmark/__init__.py | 8 +- bindings/python/google_benchmark/benchmark.cc | 147 +++--- bindings/python/google_benchmark/example.py | 2 +- bindings/python/nanobind.BUILD | 17 + bindings/python/pybind11.BUILD | 20 - bindings/python/requirements.txt | 2 - bundle.json | 3 +- cmake/CXXFeatureCheck.cmake | 29 +- cmake/GoogleTest.cmake | 16 +- cmake/benchmark.pc.in | 4 +- cmake/pthread_affinity.cpp | 16 + docs/AssemblyTests.md | 2 + docs/_config.yml | 4 +- docs/assets/images/icon.png | Bin 0 -> 11106 bytes docs/assets/images/icon.xcf | Bin 0 -> 25934 bytes docs/assets/images/icon_black.png | Bin 0 -> 11559 bytes docs/assets/images/icon_black.xcf | Bin 0 -> 36322 bytes docs/dependencies.md | 22 +- docs/index.md | 4 +- docs/perf_counters.md | 13 +- docs/python_bindings.md | 34 ++ docs/reducing_variance.md | 100 ++++ docs/releasing.md | 20 +- docs/tools.md | 140 +++++ docs/user_guide.md | 128 +++-- include/benchmark/benchmark.h | 490 +++++++++++++----- include/benchmark/export.h | 47 ++ pyproject.toml | 50 ++ setup.py | 166 +++--- src/CMakeLists.txt | 21 +- src/benchmark.cc | 273 +++++++--- src/benchmark_api_internal.cc | 20 +- src/benchmark_api_internal.h | 3 + src/benchmark_main.cc | 1 + src/benchmark_name.cc | 5 +- src/benchmark_register.cc | 52 +- src/benchmark_register.h | 5 +- src/benchmark_runner.cc | 192 ++++++- src/benchmark_runner.h | 33 +- src/check.cc | 11 + src/check.h | 24 +- src/colorprint.cc | 42 +- src/commandlineflags.cc | 19 +- src/commandlineflags.h | 39 +- src/complexity.h | 2 +- src/console_reporter.cc | 20 +- src/csv_reporter.cc | 9 +- src/cycleclock.h | 13 +- src/internal_macros.h | 17 +- src/json_reporter.cc | 29 +- src/log.h | 26 +- src/perf_counters.cc | 240 +++++++-- src/perf_counters.h | 108 ++-- src/re.h | 2 +- src/reporter.cc | 14 +- src/sleep.cc | 66 --- src/sleep.h | 15 - src/statistics.cc | 27 +- src/statistics.h | 12 +- src/string_util.cc | 65 +-- src/string_util.h | 11 +- src/sysinfo.cc | 400 +++++++++----- src/thread_manager.h | 4 +- src/timers.cc | 21 +- test/AssemblyTests.cmake | 21 + test/BUILD | 88 +++- test/CMakeLists.txt | 100 ++-- test/args_product_test.cc | 4 +- test/basic_test.cc | 9 +- test/benchmark_gtest.cc | 14 +- test/benchmark_min_time_flag_iters_test.cc | 66 +++ test/benchmark_min_time_flag_time_test.cc | 90 ++++ test/benchmark_name_gtest.cc | 8 + test/benchmark_random_interleaving_gtest.cc | 5 +- test/benchmark_setup_teardown_test.cc | 18 +- test/benchmark_test.cc | 39 +- test/clobber_memory_assembly_test.cc | 1 + test/complexity_test.cc | 12 +- test/diagnostics_test.cc | 15 +- test/donotoptimize_assembly_test.cc | 40 ++ test/donotoptimize_test.cc | 28 +- test/filter_test.cc | 31 +- test/fixture_test.cc | 6 +- test/link_main_test.cc | 3 +- test/map_test.cc | 10 +- test/memory_manager_test.cc | 11 +- test/min_time_parse_gtest.cc | 30 ++ test/multiple_ranges_test.cc | 4 +- test/options_test.cc | 6 +- test/output_test.h | 10 +- test/output_test_helper.cc | 52 +- test/perf_counters_gtest.cc | 262 ++++++++-- test/perf_counters_test.cc | 71 ++- test/register_benchmark_test.cc | 26 +- test/reporter_output_test.cc | 13 +- test/skip_with_error_test.cc | 17 +- test/spec_arg_test.cc | 16 +- test/spec_arg_verbosity_test.cc | 43 ++ test/string_util_gtest.cc | 55 +- test/time_unit_gtest.cc | 37 ++ test/user_counters_tabular_test.cc | 3 +- test/user_counters_test.cc | 28 +- test/user_counters_thousands_test.cc | 32 +- tools/BUILD.bazel | 4 +- tools/compare.py | 23 +- tools/gbench/Inputs/test1_run1.json | 8 + tools/gbench/Inputs/test1_run2.json | 8 + tools/gbench/report.py | 95 +++- tools/gbench/util.py | 36 +- tools/libpfm.BUILD.bazel | 22 + tools/requirements.txt | 3 +- tools/strip_asm.py | 2 +- 125 files changed, 3804 insertions(+), 1347 deletions(-) create mode 100644 MODULE.bazel create mode 100644 WORKSPACE.bzlmod create mode 100644 bazel/benchmark_deps.bzl create mode 100644 bindings/python/nanobind.BUILD delete mode 100644 bindings/python/pybind11.BUILD delete mode 100644 bindings/python/requirements.txt create mode 100644 cmake/pthread_affinity.cpp create mode 100644 docs/assets/images/icon.png create mode 100644 docs/assets/images/icon.xcf create mode 100644 docs/assets/images/icon_black.png create mode 100644 docs/assets/images/icon_black.xcf create mode 100644 docs/python_bindings.md create mode 100644 docs/reducing_variance.md create mode 100644 include/benchmark/export.h create mode 100644 pyproject.toml create mode 100644 src/check.cc delete mode 100644 src/sleep.cc delete mode 100644 src/sleep.h create mode 100644 test/benchmark_min_time_flag_iters_test.cc create mode 100644 test/benchmark_min_time_flag_time_test.cc create mode 100644 test/min_time_parse_gtest.cc create mode 100644 test/spec_arg_verbosity_test.cc create mode 100644 test/time_unit_gtest.cc create mode 100644 tools/libpfm.BUILD.bazel diff --git a/AUTHORS b/AUTHORS index 2b8072e..d08c1fd 100644 --- a/AUTHORS +++ b/AUTHORS @@ -13,6 +13,7 @@ Alex Steele Andriy Berestovskyy Arne Beer Carto +Cezary Skrzyński Christian Wassermann Christopher Seymour Colin Braley @@ -27,10 +28,12 @@ Eric Backus Eric Fiselier Eugene Zhuk Evgeny Safronov +Fabien Pichot Federico Ficarelli Felix Homann Gergő Szitár Google Inc. +Henrique Bucher International Business Machines Corporation Ismael Jimenez Martinez Jern-Kuan Leong @@ -41,8 +44,11 @@ Jussi Knuuttila Kaito Udagawa Kishan Kumar Lei Xu +Marcel Jacobse Matt Clarkson Maxim Vafin +Mike Apodaca +Min-Yih Hsu MongoDB Inc. Nick Hutchinson Norman Heino @@ -50,8 +56,11 @@ Oleksandr Sochka Ori Livneh Paul Redmond Radoslav Yovchev +Raghu Raja +Rainer Orth Roman Lebedev Sayan Bhattacharjee +Shapr3D Shuo Chen Staffan Tjernstrom Steinar H. Gunderson @@ -60,4 +69,3 @@ Tobias Schmidt Yixuan Qiu Yusuke Suzuki Zbigniew Skowron -Min-Yih Hsu diff --git a/BUILD.bazel b/BUILD.bazel index 904c691..60d31d2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -18,6 +18,20 @@ config_setting( visibility = [":__subpackages__"], ) +config_setting( + name = "macos", + constraint_values = ["@platforms//os:macos"], + visibility = ["//visibility:public"], +) + +config_setting( + name = "perfcounters", + define_values = { + "pfm": "1", + }, + visibility = [":__subpackages__"], +) + cc_library( name = "benchmark", srcs = glob( @@ -27,19 +41,40 @@ cc_library( ], exclude = ["src/benchmark_main.cc"], ), - hdrs = ["include/benchmark/benchmark.h"], + hdrs = [ + "include/benchmark/benchmark.h", + "include/benchmark/export.h", + ], linkopts = select({ ":windows": ["-DEFAULTLIB:shlwapi.lib"], "//conditions:default": ["-pthread"], }), + copts = select({ + ":windows": [], + "//conditions:default": ["-Werror=old-style-cast"], + }), strip_include_prefix = "include", visibility = ["//visibility:public"], + # Only static linking is allowed; no .so will be produced. + # Using `defines` (i.e. not `local_defines`) means that no + # dependent rules need to bother about defining the macro. + linkstatic = True, + defines = [ + "BENCHMARK_STATIC_DEFINE", + ] + select({ + ":perfcounters": ["HAVE_LIBPFM"], + "//conditions:default": [], + }), + deps = select({ + ":perfcounters": ["@libpfm//:libpfm"], + "//conditions:default": [], + }), ) cc_library( name = "benchmark_main", srcs = ["src/benchmark_main.cc"], - hdrs = ["include/benchmark/benchmark.h"], + hdrs = ["include/benchmark/benchmark.h", "include/benchmark/export.h"], strip_include_prefix = "include", visibility = ["//visibility:public"], deps = [":benchmark"], diff --git a/BUILD.gn b/BUILD.gn index f9d4b77..76309b6 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -32,6 +32,7 @@ ohos_static_library("benchmark") { "src/benchmark_name.cc", "src/benchmark_register.cc", "src/benchmark_runner.cc", + "src/check.cc", "src/colorprint.cc", "src/commandlineflags.cc", "src/complexity.cc", @@ -41,7 +42,6 @@ ohos_static_library("benchmark") { "src/json_reporter.cc", "src/perf_counters.cc", "src/reporter.cc", - "src/sleep.cc", "src/statistics.cc", "src/string_util.cc", "src/sysinfo.cc", diff --git a/CMakeLists.txt b/CMakeLists.txt index b8852e4..ffd7dee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,19 +1,7 @@ -cmake_minimum_required (VERSION 3.5.1) +# Require CMake 3.10. If available, use the policies up to CMake 3.22. +cmake_minimum_required (VERSION 3.10...3.22) -foreach(p - CMP0048 # OK to clear PROJECT_VERSION on project() - CMP0054 # CMake 3.1 - CMP0056 # export EXE_LINKER_FLAGS to try_run - CMP0057 # Support no if() IN_LIST operator - CMP0063 # Honor visibility properties for all targets - CMP0077 # Allow option() overrides in importing projects - ) - if(POLICY ${p}) - cmake_policy(SET ${p} NEW) - endif() -endforeach() - -project (benchmark VERSION 1.6.1 LANGUAGES CXX) +project (benchmark VERSION 1.8.3 LANGUAGES CXX) option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) @@ -26,6 +14,9 @@ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI") # PGC++ maybe reporting false positives. set(BENCHMARK_ENABLE_WERROR OFF) endif() +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "NVHPC") + set(BENCHMARK_ENABLE_WERROR OFF) +endif() if(BENCHMARK_FORCE_WERROR) set(BENCHMARK_ENABLE_WERROR ON) endif(BENCHMARK_FORCE_WERROR) @@ -50,7 +41,10 @@ option(BENCHMARK_USE_BUNDLED_GTEST "Use bundled GoogleTest. If disabled, the fin option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF) -set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) +# Export only public symbols +set(CMAKE_CXX_VISIBILITY_PRESET hidden) +set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) + if(MSVC) # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the @@ -116,17 +110,17 @@ else() set(VERSION "${GIT_VERSION}") endif() # Tell the user what versions we are using -message(STATUS "Version: ${VERSION}") +message(STATUS "Google Benchmark version: ${VERSION}") # The version of the libraries set(GENERIC_LIB_VERSION ${VERSION}) string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) # Import our CMake modules -include(CheckCXXCompilerFlag) include(AddCXXCompilerFlag) -include(CXXFeatureCheck) +include(CheckCXXCompilerFlag) include(CheckLibraryExists) +include(CXXFeatureCheck) check_library_exists(rt shm_open "" HAVE_LIB_RT) @@ -134,6 +128,16 @@ if (BENCHMARK_BUILD_32_BITS) add_required_cxx_compiler_flag(-m32) endif() +if (MSVC) + set(BENCHMARK_CXX_STANDARD 14) +else() + set(BENCHMARK_CXX_STANDARD 11) +endif() + +set(CMAKE_CXX_STANDARD ${BENCHMARK_CXX_STANDARD}) +set(CMAKE_CXX_STANDARD_REQUIRED YES) +set(CMAKE_CXX_EXTENSIONS OFF) + if (MSVC) # Turn compiler warnings up to 11 string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") @@ -166,21 +170,14 @@ if (MSVC) set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG") endif() else() - # Try and enable C++11. Don't use C++14 because it doesn't work in some - # configurations. - add_cxx_compiler_flag(-std=c++11) - if (NOT HAVE_CXX_FLAG_STD_CXX11) - add_cxx_compiler_flag(-std=c++0x) - endif() - # Turn compiler warnings up to 11 add_cxx_compiler_flag(-Wall) add_cxx_compiler_flag(-Wextra) add_cxx_compiler_flag(-Wshadow) + add_cxx_compiler_flag(-Wfloat-equal) + add_cxx_compiler_flag(-Wold-style-cast) if(BENCHMARK_ENABLE_WERROR) - add_cxx_compiler_flag(-Werror RELEASE) - add_cxx_compiler_flag(-Werror RELWITHDEBINFO) - add_cxx_compiler_flag(-Werror MINSIZEREL) + add_cxx_compiler_flag(-Werror) endif() if (NOT BENCHMARK_ENABLE_TESTING) # Disable warning when compiling tests as gtest does not use 'override'. @@ -193,24 +190,23 @@ else() # Disable warnings regarding deprecated parts of the library while building # and testing those parts of the library. add_cxx_compiler_flag(-Wno-deprecated-declarations) - if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" OR CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") # Intel silently ignores '-Wno-deprecated-declarations', # warning no. 1786 must be explicitly disabled. # See #631 for rationale. add_cxx_compiler_flag(-wd1786) + add_cxx_compiler_flag(-fno-finite-math-only) endif() # Disable deprecation warnings for release builds (when -Werror is enabled). if(BENCHMARK_ENABLE_WERROR) - add_cxx_compiler_flag(-Wno-deprecated RELEASE) - add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) - add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) + add_cxx_compiler_flag(-Wno-deprecated) endif() if (NOT BENCHMARK_ENABLE_EXCEPTIONS) add_cxx_compiler_flag(-fno-exceptions) endif() if (HAVE_CXX_FLAG_FSTRICT_ALIASING) - if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing + if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") #ICC17u2: Many false positives for Wstrict-aliasing add_cxx_compiler_flag(-Wstrict-aliasing) endif() endif() @@ -219,12 +215,12 @@ else() add_cxx_compiler_flag(-wd654) add_cxx_compiler_flag(-Wthread-safety) if (HAVE_CXX_FLAG_WTHREAD_SAFETY) - cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) + cxx_feature_check(THREAD_SAFETY_ATTRIBUTES "-DINCLUDE_DIRECTORIES=${PROJECT_SOURCE_DIR}/include") endif() # On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a # predefined macro, which turns on all of the wonderful libc extensions. - # However g++ doesn't do this in Cygwin so we have to define it ourselfs + # However g++ doesn't do this in Cygwin so we have to define it ourselves # since we depend on GNU/POSIX/BSD extensions. if (CYGWIN) add_definitions(-D_GNU_SOURCE=1) @@ -275,7 +271,8 @@ if (BENCHMARK_USE_LIBCXX) if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") add_cxx_compiler_flag(-stdlib=libc++) elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR - "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") + "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel" OR + "${CMAKE_CXX_COMPILER_ID}" STREQUAL "IntelLLVM") add_cxx_compiler_flag(-nostdinc++) message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS") # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break @@ -312,6 +309,7 @@ cxx_feature_check(STEADY_CLOCK) # Ensure we have pthreads set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) +cxx_feature_check(PTHREAD_AFFINITY) if (BENCHMARK_ENABLE_LIBPFM) find_package(PFM) diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 651fbea..95bcad0 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -27,7 +27,9 @@ Albert Pretorius Alex Steele Andriy Berestovskyy Arne Beer +Bátor Tallér Billy Robert O'Neal III +Cezary Skrzyński Chris Kennelly Christian Wassermann Christopher Seymour @@ -44,12 +46,14 @@ Eric Backus Eric Fiselier Eugene Zhuk Evgeny Safronov +Fabien Pichot Fanbo Meng Federico Ficarelli Felix Homann Geoffrey Martin-Noble Gergő Szitár Hannes Hauswedell +Henrique Bucher Ismael Jimenez Martinez Jern-Kuan Leong JianXiong Zhou @@ -57,12 +61,15 @@ Joao Paulo Magalhaes John Millikin Jordan Williams Jussi Knuuttila -Kai Wolf Kaito Udagawa +Kai Wolf Kishan Kumar Lei Xu +Marcel Jacobse Matt Clarkson Maxim Vafin +Mike Apodaca +Min-Yih Hsu Nick Hutchinson Norman Heino Oleksandr Sochka @@ -71,6 +78,8 @@ Pascal Leroy Paul Redmond Pierre Phaneuf Radoslav Yovchev +Raghu Raja +Rainer Orth Raul Marin Ray Glover Robert Guo @@ -84,4 +93,3 @@ Tom Madams Yixuan Qiu Yusuke Suzuki Zbigniew Skowron -Min-Yih Hsu diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 0000000..37a5f5d --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,24 @@ +module(name = "google_benchmark", version="1.8.3") + +bazel_dep(name = "bazel_skylib", version = "1.4.1") +bazel_dep(name = "platforms", version = "0.0.6") +bazel_dep(name = "rules_foreign_cc", version = "0.9.0") +bazel_dep(name = "rules_cc", version = "0.0.6") +bazel_dep(name = "rules_python", version = "0.24.0", dev_dependency = True) +bazel_dep(name = "googletest", version = "1.12.1", repo_name = "com_google_googletest", dev_dependency = True) +bazel_dep(name = "libpfm", version = "4.11.0") + +# Register a toolchain for Python 3.9 to be able to build numpy. Python +# versions >=3.10 are problematic. +# A second reason for this is to be able to build Python hermetically instead +# of relying on the changing default version from rules_python. + +python = use_extension("@rules_python//python/extensions:python.bzl", "python", dev_dependency = True) +python.toolchain(python_version = "3.9") + +pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_dependency = True) +pip.parse( + hub_name="tools_pip_deps", + python_version = "3.9", + requirements_lock="//tools:requirements.txt") +use_repo(pip, "tools_pip_deps") diff --git a/README.OpenSource b/README.OpenSource index 5b8bc54..b868188 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,9 +3,9 @@ "Name": "benchmark", "License": "Apache License V2.0", "License File": "LICENSE", - "Version Number": "1.6.1", - "Owner": "mipengwei@huawei.com", - "Upstream URL": "https://github.com/google/benchmark/releases/tag/v1.6.1", + "Version Number": "1.8.3", + "Owner": "zzr189911@163.com", + "Upstream URL": "https://github.com/google/benchmark/releases/tag/v1.8.3", "Description": "A microbenchmark support library" } ] diff --git a/README.md b/README.md index 7b81d96..a5e5d39 100644 --- a/README.md +++ b/README.md @@ -4,10 +4,9 @@ [![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml) [![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint) [![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings) - -[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) [![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) +[![Discord](https://discordapp.com/api/guilds/1125694995928719494/widget.png?style=shield)](https://discord.gg/cz7UX7wKC2) A library to benchmark code snippets, similar to unit tests. Example: @@ -33,7 +32,7 @@ To get started, see [Requirements](#requirements) and [Installation](#installation). See [Usage](#usage) for a full example and the [User Guide](docs/user_guide.md) for a more comprehensive feature overview. -It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md) +It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/main/docs/primer.md) as some of the structural aspects of the APIs are similar. ## Resources @@ -47,6 +46,8 @@ IRC channels: [Assembly Testing Documentation](docs/AssemblyTests.md) +[Building and installing Python bindings](docs/python_bindings.md) + ## Requirements The library can be used with C++03. However, it requires C++11 to build, @@ -137,6 +138,12 @@ cache variables, if autodetection fails. If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. +To enable sanitizer checks (eg., `asan` and `tsan`), add: +``` + -DCMAKE_C_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=address -fsanitize=thread -fno-sanitize-recover=all" + -DCMAKE_CXX_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=address -fsanitize=thread -fno-sanitize-recover=all " +``` + ### Stable and Experimental Library Versions The main branch contains the latest stable version of the benchmarking library; diff --git a/WORKSPACE b/WORKSPACE index 949eb98..833590f 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,44 +1,22 @@ workspace(name = "com_github_google_benchmark") -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") +load("//:bazel/benchmark_deps.bzl", "benchmark_deps") -http_archive( - name = "com_google_absl", - sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", - strip_prefix = "abseil-cpp-20200225.2", - urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], -) +benchmark_deps() -git_repository( - name = "com_google_googletest", - remote = "https://github.com/google/googletest.git", - tag = "release-1.11.0", -) +load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies") -http_archive( - name = "pybind11", - build_file = "@//bindings/python:pybind11.BUILD", - sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", - strip_prefix = "pybind11-2.4.3", - urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"], +rules_foreign_cc_dependencies() + +load("@rules_python//python:pip.bzl", pip3_install="pip_install") + +pip3_install( + name = "tools_pip_deps", + requirements = "//tools:requirements.txt", ) new_local_repository( name = "python_headers", build_file = "@//bindings/python:python_headers.BUILD", - path = "/usr/include/python3.6", # May be overwritten by setup.py. -) - -http_archive( - name = "rules_python", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", - sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", -) - -load("@rules_python//python:pip.bzl", pip3_install="pip_install") - -pip3_install( - name = "py_deps", - requirements = "//:requirements.txt", + path = "", # May be overwritten by setup.py. ) diff --git a/WORKSPACE.bzlmod b/WORKSPACE.bzlmod new file mode 100644 index 0000000..9526376 --- /dev/null +++ b/WORKSPACE.bzlmod @@ -0,0 +1,2 @@ +# This file marks the root of the Bazel workspace. +# See MODULE.bazel for dependencies and setup. diff --git a/bazel/benchmark_deps.bzl b/bazel/benchmark_deps.bzl new file mode 100644 index 0000000..667065f --- /dev/null +++ b/bazel/benchmark_deps.bzl @@ -0,0 +1,65 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") + +def benchmark_deps(): + """Loads dependencies required to build Google Benchmark.""" + + if "bazel_skylib" not in native.existing_rules(): + http_archive( + name = "bazel_skylib", + sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz", + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz", + ], + ) + + if "rules_foreign_cc" not in native.existing_rules(): + http_archive( + name = "rules_foreign_cc", + sha256 = "bcd0c5f46a49b85b384906daae41d277b3dc0ff27c7c752cc51e43048a58ec83", + strip_prefix = "rules_foreign_cc-0.7.1", + url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.7.1.tar.gz", + ) + + if "rules_python" not in native.existing_rules(): + http_archive( + name = "rules_python", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", + sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", + ) + + if "com_google_absl" not in native.existing_rules(): + http_archive( + name = "com_google_absl", + sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", + strip_prefix = "abseil-cpp-20200225.2", + urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], + ) + + if "com_google_googletest" not in native.existing_rules(): + new_git_repository( + name = "com_google_googletest", + remote = "https://github.com/google/googletest.git", + tag = "release-1.11.0", + ) + + if "nanobind" not in native.existing_rules(): + new_git_repository( + name = "nanobind", + remote = "https://github.com/wjakob/nanobind.git", + tag = "v1.4.0", + build_file = "@//bindings/python:nanobind.BUILD", + recursive_init_submodules = True, + ) + + if "libpfm" not in native.existing_rules(): + # Downloaded from v4.9.0 tag at https://sourceforge.net/p/perfmon2/libpfm4/ref/master/tags/ + http_archive( + name = "libpfm", + build_file = str(Label("//tools:libpfm.BUILD.bazel")), + sha256 = "5da5f8872bde14b3634c9688d980f68bda28b510268723cc12973eedbab9fecc", + type = "tar.gz", + strip_prefix = "libpfm-4.11.0", + urls = ["https://sourceforge.net/projects/perfmon2/files/libpfm4/libpfm-4.11.0.tar.gz/download"], + ) diff --git a/bindings/python/build_defs.bzl b/bindings/python/build_defs.bzl index 45907aa..009820a 100644 --- a/bindings/python/build_defs.bzl +++ b/bindings/python/build_defs.bzl @@ -8,8 +8,8 @@ def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []): shared_lib_name = name + shared_lib_suffix native.cc_binary( name = shared_lib_name, - linkshared = 1, - linkstatic = 1, + linkshared = True, + linkstatic = True, srcs = srcs + hdrs, copts = copts, features = features, diff --git a/bindings/python/google_benchmark/BUILD b/bindings/python/google_benchmark/BUILD index 3c1561f..89ec76e 100644 --- a/bindings/python/google_benchmark/BUILD +++ b/bindings/python/google_benchmark/BUILD @@ -6,7 +6,6 @@ py_library( visibility = ["//visibility:public"], deps = [ ":_benchmark", - # pip; absl:app ], ) @@ -17,10 +16,13 @@ py_extension( "-fexceptions", "-fno-strict-aliasing", ], - features = ["-use_header_modules"], + features = [ + "-use_header_modules", + "-parse_headers", + ], deps = [ "//:benchmark", - "@pybind11", + "@nanobind", "@python_headers", ], ) diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py index ec651c1..642d78a 100644 --- a/bindings/python/google_benchmark/__init__.py +++ b/bindings/python/google_benchmark/__init__.py @@ -26,6 +26,7 @@ Example usage: if __name__ == '__main__': benchmark.main() """ +import atexit from absl import app from google_benchmark import _benchmark @@ -44,6 +45,7 @@ from google_benchmark._benchmark import ( oNLogN, oAuto, oLambda, + State, ) @@ -64,9 +66,10 @@ __all__ = [ "oNLogN", "oAuto", "oLambda", + "State", ] -__version__ = "1.6.1" +__version__ = "1.8.3" class __OptionMaker: @@ -101,7 +104,7 @@ class __OptionMaker: options = self.make(func_or_options) options.builder_calls.append((builder_name, args, kwargs)) # The decorator returns Options so it is not technically a decorator - # and needs a final call to @regiser + # and needs a final call to @register return options return __decorator @@ -156,3 +159,4 @@ def main(argv=None): # Methods for use with custom main function. initialize = _benchmark.Initialize run_benchmarks = _benchmark.RunSpecifiedBenchmarks +atexit.register(_benchmark.ClearRegisteredBenchmarks) diff --git a/bindings/python/google_benchmark/benchmark.cc b/bindings/python/google_benchmark/benchmark.cc index 02b6ed7..f444769 100644 --- a/bindings/python/google_benchmark/benchmark.cc +++ b/bindings/python/google_benchmark/benchmark.cc @@ -1,20 +1,17 @@ // Benchmark for Python. -#include -#include -#include - -#include "pybind11/operators.h" -#include "pybind11/pybind11.h" -#include "pybind11/stl.h" -#include "pybind11/stl_bind.h" - #include "benchmark/benchmark.h" -PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); +#include "nanobind/nanobind.h" +#include "nanobind/operators.h" +#include "nanobind/stl/bind_map.h" +#include "nanobind/stl/string.h" +#include "nanobind/stl/vector.h" + +NB_MAKE_OPAQUE(benchmark::UserCounters); namespace { -namespace py = ::pybind11; +namespace nb = nanobind; std::vector Initialize(const std::vector& argv) { // The `argv` pointers here become invalid when this function returns, but @@ -37,15 +34,16 @@ std::vector Initialize(const std::vector& argv) { return remaining_argv; } -benchmark::internal::Benchmark* RegisterBenchmark(const char* name, - py::function f) { +benchmark::internal::Benchmark* RegisterBenchmark(const std::string& name, + nb::callable f) { return benchmark::RegisterBenchmark( name, [f](benchmark::State& state) { f(&state); }); } -PYBIND11_MODULE(_benchmark, m) { +NB_MODULE(_benchmark, m) { + using benchmark::TimeUnit; - py::enum_(m, "TimeUnit") + nb::enum_(m, "TimeUnit") .value("kNanosecond", TimeUnit::kNanosecond) .value("kMicrosecond", TimeUnit::kMicrosecond) .value("kMillisecond", TimeUnit::kMillisecond) @@ -53,72 +51,74 @@ PYBIND11_MODULE(_benchmark, m) { .export_values(); using benchmark::BigO; - py::enum_(m, "BigO") + nb::enum_(m, "BigO") .value("oNone", BigO::oNone) .value("o1", BigO::o1) .value("oN", BigO::oN) .value("oNSquared", BigO::oNSquared) .value("oNCubed", BigO::oNCubed) .value("oLogN", BigO::oLogN) - .value("oNLogN", BigO::oLogN) + .value("oNLogN", BigO::oNLogN) .value("oAuto", BigO::oAuto) .value("oLambda", BigO::oLambda) .export_values(); using benchmark::internal::Benchmark; - py::class_(m, "Benchmark") - // For methods returning a pointer tor the current object, reference - // return policy is used to ask pybind not to take ownership oof the + nb::class_(m, "Benchmark") + // For methods returning a pointer to the current object, reference + // return policy is used to ask nanobind not to take ownership of the // returned object and avoid calling delete on it. // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies // // For methods taking a const std::vector<...>&, a copy is created // because a it is bound to a Python list. // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html - .def("unit", &Benchmark::Unit, py::return_value_policy::reference) - .def("arg", &Benchmark::Arg, py::return_value_policy::reference) - .def("args", &Benchmark::Args, py::return_value_policy::reference) - .def("range", &Benchmark::Range, py::return_value_policy::reference, - py::arg("start"), py::arg("limit")) + .def("unit", &Benchmark::Unit, nb::rv_policy::reference) + .def("arg", &Benchmark::Arg, nb::rv_policy::reference) + .def("args", &Benchmark::Args, nb::rv_policy::reference) + .def("range", &Benchmark::Range, nb::rv_policy::reference, + nb::arg("start"), nb::arg("limit")) .def("dense_range", &Benchmark::DenseRange, - py::return_value_policy::reference, py::arg("start"), - py::arg("limit"), py::arg("step") = 1) - .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference) + nb::rv_policy::reference, nb::arg("start"), + nb::arg("limit"), nb::arg("step") = 1) + .def("ranges", &Benchmark::Ranges, nb::rv_policy::reference) .def("args_product", &Benchmark::ArgsProduct, - py::return_value_policy::reference) - .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference) + nb::rv_policy::reference) + .def("arg_name", &Benchmark::ArgName, nb::rv_policy::reference) .def("arg_names", &Benchmark::ArgNames, - py::return_value_policy::reference) + nb::rv_policy::reference) .def("range_pair", &Benchmark::RangePair, - py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"), - py::arg("lo2"), py::arg("hi2")) + nb::rv_policy::reference, nb::arg("lo1"), nb::arg("hi1"), + nb::arg("lo2"), nb::arg("hi2")) .def("range_multiplier", &Benchmark::RangeMultiplier, - py::return_value_policy::reference) - .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference) + nb::rv_policy::reference) + .def("min_time", &Benchmark::MinTime, nb::rv_policy::reference) + .def("min_warmup_time", &Benchmark::MinWarmUpTime, + nb::rv_policy::reference) .def("iterations", &Benchmark::Iterations, - py::return_value_policy::reference) + nb::rv_policy::reference) .def("repetitions", &Benchmark::Repetitions, - py::return_value_policy::reference) + nb::rv_policy::reference) .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly, - py::return_value_policy::reference, py::arg("value") = true) + nb::rv_policy::reference, nb::arg("value") = true) .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly, - py::return_value_policy::reference, py::arg("value") = true) + nb::rv_policy::reference, nb::arg("value") = true) .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime, - py::return_value_policy::reference) + nb::rv_policy::reference) .def("use_real_time", &Benchmark::UseRealTime, - py::return_value_policy::reference) + nb::rv_policy::reference) .def("use_manual_time", &Benchmark::UseManualTime, - py::return_value_policy::reference) + nb::rv_policy::reference) .def( "complexity", (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity, - py::return_value_policy::reference, - py::arg("complexity") = benchmark::oAuto); + nb::rv_policy::reference, + nb::arg("complexity") = benchmark::oAuto); using benchmark::Counter; - py::class_ py_counter(m, "Counter"); + nb::class_ py_counter(m, "Counter"); - py::enum_(py_counter, "Flags") + nb::enum_(py_counter, "Flags") .value("kDefaults", Counter::Flags::kDefaults) .value("kIsRate", Counter::Flags::kIsRate) .value("kAvgThreads", Counter::Flags::kAvgThreads) @@ -130,52 +130,55 @@ PYBIND11_MODULE(_benchmark, m) { .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate) .value("kInvert", Counter::Flags::kInvert) .export_values() - .def(py::self | py::self); + .def(nb::self | nb::self); - py::enum_(py_counter, "OneK") + nb::enum_(py_counter, "OneK") .value("kIs1000", Counter::OneK::kIs1000) .value("kIs1024", Counter::OneK::kIs1024) .export_values(); py_counter - .def(py::init(), - py::arg("value") = 0., py::arg("flags") = Counter::kDefaults, - py::arg("k") = Counter::kIs1000) - .def(py::init([](double value) { return Counter(value); })) - .def_readwrite("value", &Counter::value) - .def_readwrite("flags", &Counter::flags) - .def_readwrite("oneK", &Counter::oneK); - py::implicitly_convertible(); - py::implicitly_convertible(); + .def(nb::init(), + nb::arg("value") = 0., nb::arg("flags") = Counter::kDefaults, + nb::arg("k") = Counter::kIs1000) + .def("__init__", ([](Counter *c, double value) { new (c) Counter(value); })) + .def_rw("value", &Counter::value) + .def_rw("flags", &Counter::flags) + .def_rw("oneK", &Counter::oneK) + .def(nb::init_implicit()); - py::bind_map(m, "UserCounters"); + nb::implicitly_convertible(); + + nb::bind_map(m, "UserCounters"); using benchmark::State; - py::class_(m, "State") + nb::class_(m, "State") .def("__bool__", &State::KeepRunning) - .def_property_readonly("keep_running", &State::KeepRunning) + .def_prop_ro("keep_running", &State::KeepRunning) .def("pause_timing", &State::PauseTiming) .def("resume_timing", &State::ResumeTiming) .def("skip_with_error", &State::SkipWithError) - .def_property_readonly("error_occurred", &State::error_occurred) + .def_prop_ro("error_occurred", &State::error_occurred) .def("set_iteration_time", &State::SetIterationTime) - .def_property("bytes_processed", &State::bytes_processed, + .def_prop_rw("bytes_processed", &State::bytes_processed, &State::SetBytesProcessed) - .def_property("complexity_n", &State::complexity_length_n, + .def_prop_rw("complexity_n", &State::complexity_length_n, &State::SetComplexityN) - .def_property("items_processed", &State::items_processed, - &State::SetItemsProcessed) - .def("set_label", (void(State::*)(const char*)) & State::SetLabel) - .def("range", &State::range, py::arg("pos") = 0) - .def_property_readonly("iterations", &State::iterations) - .def_readwrite("counters", &State::counters) - .def_property_readonly("thread_index", &State::thread_index) - .def_property_readonly("threads", &State::threads); + .def_prop_rw("items_processed", &State::items_processed, + &State::SetItemsProcessed) + .def("set_label", &State::SetLabel) + .def("range", &State::range, nb::arg("pos") = 0) + .def_prop_ro("iterations", &State::iterations) + .def_prop_ro("name", &State::name) + .def_rw("counters", &State::counters) + .def_prop_ro("thread_index", &State::thread_index) + .def_prop_ro("threads", &State::threads); m.def("Initialize", Initialize); m.def("RegisterBenchmark", RegisterBenchmark, - py::return_value_policy::reference); + nb::rv_policy::reference); m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); }); + m.def("ClearRegisteredBenchmarks", benchmark::ClearRegisteredBenchmarks); }; } // namespace diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index 487acc9..d95a043 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -72,7 +72,7 @@ def manual_timing(state): @benchmark.register def custom_counters(state): - """Collect cutom metric using benchmark.Counter.""" + """Collect custom metric using benchmark.Counter.""" num_foo = 0.0 while state: # Benchmark some code here diff --git a/bindings/python/nanobind.BUILD b/bindings/python/nanobind.BUILD new file mode 100644 index 0000000..cd9faf9 --- /dev/null +++ b/bindings/python/nanobind.BUILD @@ -0,0 +1,17 @@ +cc_library( + name = "nanobind", + srcs = glob([ + "src/*.cpp" + ]), + copts = ["-fexceptions"], + includes = ["include", "ext/robin_map/include"], + textual_hdrs = glob( + [ + "include/**/*.h", + "src/*.h", + "ext/robin_map/include/tsl/*.h", + ], + ), + deps = ["@python_headers"], + visibility = ["//visibility:public"], +) diff --git a/bindings/python/pybind11.BUILD b/bindings/python/pybind11.BUILD deleted file mode 100644 index bc83350..0000000 --- a/bindings/python/pybind11.BUILD +++ /dev/null @@ -1,20 +0,0 @@ -cc_library( - name = "pybind11", - hdrs = glob( - include = [ - "include/pybind11/*.h", - "include/pybind11/detail/*.h", - ], - exclude = [ - "include/pybind11/common.h", - "include/pybind11/eigen.h", - ], - ), - copts = [ - "-fexceptions", - "-Wno-undefined-inline", - "-Wno-pragma-once-outside-header", - ], - includes = ["include"], - visibility = ["//visibility:public"], -) diff --git a/bindings/python/requirements.txt b/bindings/python/requirements.txt deleted file mode 100644 index f5bbe7e..0000000 --- a/bindings/python/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -absl-py>=0.7.1 - diff --git a/bundle.json b/bundle.json index b17909a..2992320 100644 --- a/bundle.json +++ b/bundle.json @@ -31,7 +31,8 @@ "name":"//third_party/benchmark:benchmark", "header":{ "header_files":[ - "benchmark.h" + "benchmark.h", + "export.h" ], "header_base":"//third_party/benchmark/include" } diff --git a/cmake/CXXFeatureCheck.cmake b/cmake/CXXFeatureCheck.cmake index 62e6741..e514826 100644 --- a/cmake/CXXFeatureCheck.cmake +++ b/cmake/CXXFeatureCheck.cmake @@ -17,6 +17,8 @@ if(__cxx_feature_check) endif() set(__cxx_feature_check INCLUDED) +option(CXXFEATURECHECK_DEBUG OFF) + function(cxx_feature_check FILE) string(TOLOWER ${FILE} FILE) string(TOUPPER ${FILE} VAR) @@ -27,18 +29,22 @@ function(cxx_feature_check FILE) return() endif() + set(FEATURE_CHECK_CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) if (ARGC GREATER 1) message(STATUS "Enabling additional flags: ${ARGV1}") - list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1}) + list(APPEND FEATURE_CHECK_CMAKE_FLAGS ${ARGV1}) endif() if (NOT DEFINED COMPILE_${FEATURE}) - message(STATUS "Performing Test ${FEATURE}") if(CMAKE_CROSSCOMPILING) + message(STATUS "Cross-compiling to test ${FEATURE}") try_compile(COMPILE_${FEATURE} ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED ON + CMAKE_FLAGS ${FEATURE_CHECK_CMAKE_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) if(COMPILE_${FEATURE}) message(WARNING "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") @@ -47,11 +53,14 @@ function(cxx_feature_check FILE) set(RUN_${FEATURE} 1 CACHE INTERNAL "") endif() else() - message(STATUS "Performing Test ${FEATURE}") + message(STATUS "Compiling and running to test ${FEATURE}") try_run(RUN_${FEATURE} COMPILE_${FEATURE} ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + CXX_STANDARD 11 + CXX_STANDARD_REQUIRED ON + CMAKE_FLAGS ${FEATURE_CHECK_CMAKE_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + COMPILE_OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) endif() endif() @@ -61,7 +70,11 @@ function(cxx_feature_check FILE) add_definitions(-DHAVE_${VAR}) else() if(NOT COMPILE_${FEATURE}) - message(STATUS "Performing Test ${FEATURE} -- failed to compile") + if(CXXFEATURECHECK_DEBUG) + message(STATUS "Performing Test ${FEATURE} -- failed to compile: ${COMPILE_OUTPUT_VAR}") + else() + message(STATUS "Performing Test ${FEATURE} -- failed to compile") + endif() else() message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run") endif() diff --git a/cmake/GoogleTest.cmake b/cmake/GoogleTest.cmake index 66cb910..e66e9d1 100644 --- a/cmake/GoogleTest.cmake +++ b/cmake/GoogleTest.cmake @@ -29,15 +29,25 @@ set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) -# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves. -add_compile_options(-w) - # Add googletest directly to our build. This defines # the gtest and gtest_main targets. add_subdirectory(${GOOGLETEST_SOURCE_DIR} ${GOOGLETEST_BINARY_DIR} EXCLUDE_FROM_ALL) +# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves. +if (MSVC) + target_compile_options(gtest PRIVATE "/wd4244" "/wd4722") + target_compile_options(gtest_main PRIVATE "/wd4244" "/wd4722") + target_compile_options(gmock PRIVATE "/wd4244" "/wd4722") + target_compile_options(gmock_main PRIVATE "/wd4244" "/wd4722") +else() + target_compile_options(gtest PRIVATE "-w") + target_compile_options(gtest_main PRIVATE "-w") + target_compile_options(gmock PRIVATE "-w") + target_compile_options(gmock_main PRIVATE "-w") +endif() + if(NOT DEFINED GTEST_COMPILE_COMMANDS) set(GTEST_COMPILE_COMMANDS ON) endif() diff --git a/cmake/benchmark.pc.in b/cmake/benchmark.pc.in index 34beb01..9dae881 100644 --- a/cmake/benchmark.pc.in +++ b/cmake/benchmark.pc.in @@ -1,7 +1,7 @@ prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} -libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ -includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: @PROJECT_NAME@ Description: Google microbenchmark framework diff --git a/cmake/pthread_affinity.cpp b/cmake/pthread_affinity.cpp new file mode 100644 index 0000000..7b143bc --- /dev/null +++ b/cmake/pthread_affinity.cpp @@ -0,0 +1,16 @@ +#include +int main() { + cpu_set_t set; + CPU_ZERO(&set); + for (int i = 0; i < CPU_SETSIZE; ++i) { + CPU_SET(i, &set); + CPU_CLR(i, &set); + } + pthread_t self = pthread_self(); + int ret; + ret = pthread_getaffinity_np(self, sizeof(set), &set); + if (ret != 0) return ret; + ret = pthread_setaffinity_np(self, sizeof(set), &set); + if (ret != 0) return ret; + return 0; +} diff --git a/docs/AssemblyTests.md b/docs/AssemblyTests.md index 1fbdc26..89df7ca 100644 --- a/docs/AssemblyTests.md +++ b/docs/AssemblyTests.md @@ -111,6 +111,7 @@ between compilers or compiler versions. A common example of this is matching stack frame addresses. In this case regular expressions can be used to match the differing bits of output. For example: + ```c++ int ExternInt; struct Point { int x, y, z; }; @@ -127,6 +128,7 @@ extern "C" void test_store_point() { // CHECK: ret } ``` + ## Current Requirements and Limitations diff --git a/docs/_config.yml b/docs/_config.yml index 2f7efbe..32f9f2e 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -1 +1,3 @@ -theme: jekyll-theme-minimal \ No newline at end of file +theme: jekyll-theme-minimal +logo: /assets/images/icon_black.png +show_downloads: true diff --git a/docs/assets/images/icon.png b/docs/assets/images/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..b98260486e421e2f1caad425148160910778639e GIT binary patch literal 11106 zcmch7^;?u*^z8sD-6bWcNJvXJNJvVDfPg3Vm%GMl#V2SdL5?APuqU|)%>bze@piAV`qn@^zedj|Rj$9#; zb+9g-3g^XZCVa*4h@3?O$G^|3))$ebB|#+&SbhPGO+u_q3U9BJaX8Gxg|@q!y(635 z1Jt>A2iL>VlI>H9WQ_)1=yj&+UE_X3}LbRiXXifMhT z=em6SNGoJm@meg`3o%_`2dD8qo$=4i{?E7zD%f4pR#`~&zmYJ8sjJIjn@@&~6vvp| zmOI-exFnw7ic-F`ww9HOpMg_qBQ_iy&e!m4F(@ZERHraF-2;}GWdzLi_D0F}JQpwCwX)*k&7=F; z7&{F5rb-~J*6({_(9)v+y548J^LJay+uhK6de8oBHA&wq7JeC_~%8z8>C%jH% zn*HGiwxhxm7X*Uv8S0HzC|O_yA7Z*Hs>)&h#v;eZClmaj9fv?LAQWY#wLPY{<~+6a z%rDz_LzDgZ`Nvr8$cT?$E3@3WpPOxJot@Yio1FhlsXTAet-6YboZpJ3YQS#RkyWmP zpI<{+edG zVZ-*smBjt~O0!-YWyp9#d*Y*c?~2B)`We|Ykyn@dzqhwJKex05cPzbhUGP0x8Qnv4 zqTMJdE!`jN>k~;!OXH}nsgd*ZYh__)Cu3Gi=X>EF9v&{c(CQ0Ah*VPq$T^A|i(AdOJ1|w@sGUK|ybw z0+0dg_gPt^n%#FOtgNh%?SUA`jf%GVIUcu-aaGsNNsTulA>rF|&7}WeQWMeX>74F6 z^Bnc|bN3H-7IvJw>2KZQY_uuGu&N<{SpK3OBA_=PBM`crm@U_REG%U4T>r@$ehsljC+sZiwmq8^HU8Du@5CJE8p19%F0Qll z+c)m&X<1v_(sz-O1qz75PYnxxDZ@YW4eG4#NuF);hF=RPR#sMyxxBo5m&xfo+EiQ1 z7gZQaUU?@kkV!R}@fQ;L5{t6h>%e-qgHSSz=XuvhZ$x>$@0r`$*4AKAEw1}WK)c(4Kln#mhnzkd{|a;4(_ZKT@K{#mW1{QS5O+P%tiKAKbg~ zC(qg08E$CDZCZj*M|XG1SL;8CUu}$&quxhz?ir20)4d^_XPvs~kQM$HHpg*S+bIsM zimedpsBkYVEhSr7S<$-?PJ67Oug`*miyJ(;=9+Y9J6&6(prk|s(YegRLf}S3N5_%C z`P6#m7cKz-1uAa;Cw8;tA}F%`E>8Q9EL>b_eisK5cSJ=EUszg3RJ2_hq>8-itKUjX zOk}x%Pc55{uB&5gEHQ1*R_NvB)d!<`-RC_Y-4b$aubub`+fFLzFwf2m+bkm^W9Ob^ zHD3B;HWX3HA}MKddU7&l#(;x^6CJ~(8eLRW6o2bubWF@ju42rKf2)Ib!(1JqgauLv z-Mvmq69qLjHDyAv7Vo3@65X=sxVX4hx6K?<=EprxYAkvopOlZZJUoQ1qgTF$W$3$Q zu8o(a7`6I*vcvs)u)Mn3w>DbPFaMtUN%aCcI{FJmMQk~A?h@G4C-$aACT*>$d{!hT z_s=r+_g#g(_FwiN@G#!f({rEIlmeH6y>b9SLPf=Tw6idT(8R~a#(u!S&`xq;SYa%l6c;CVg)W~W z>Rx{D#`}VFFQs%b&uHl!3IT_C+qX26u-*h=Vc~uL_Pku0A+iU&yi_cFwUDKwl-RKT zq?8opHjLu3CpBF^gJkse6IImIEFFIgN8eL^(c*QW;!F5C5)%!_Znkl_7ULnLzWKw4 z=oqgbY85=@w-e*DpVdznb^qXj*sr%7x;0904L&y~;#M#=PRAo86x{MA+9%4)dEWh@ zH#U}R^q#+h0+t|y>Kz^)4FY(oX!Dg%Ke_yl-e+=I`WhTA z9dsj!Q#U&&=PkLE&DKnVpiVaC_3ORAx+7yYnKI-O?ec|PR#nVy{7`E1yHJT+tY|&G zk8MwUa&l4)H_Q*usI!?Mk(9C^34ha}Tq>+NQk(jFIB-Ew+WH|CC}N=8AEyjm0kcoF_ub$Mka$s#Skr$+&aQ&>J; zW|(p(aKU>=oMbZO&Ye3d#3of$Ro^Jpb$)fd=aoa+&woz(R#ydi^_|SA-hIayiPuQQ z>(g5X(Y*I}se`c;*W=%yAbzAEpyakBLeLpjjFUsD`EPH}%*u|k2B4z@SN7`i;;rza zm)F6%`ODXPMUN)TL(~SUP47D5LooC5@uUbkd|yxE{EnBvhl_&~CcTK0lOgUfl8YUT z_7LkfAe+@oc6nJ@qR@77h;o7bTNqpoOGgry$IoSOzZL70ggt8SNeR4l>(=b^cK$N` zs;@uX=~2)k&DCxm7az|Q5*$nw|HdZ>O>l4A(1D5~MNRyGtO|RA-|QP!N3_Gy_8cR< zz4hi~)i;*R5j_)MbNvwxqsEW9GI0lMqurIve_r}FrvCV$E<>_Se|3zB9cs;MUDbN> z?>A|MBOSf(e$?%^u#InP zW4tUR^`}azP*e={q=}JH>TT@!s3_^zS>yRCNoES?dp-QNLt&5feh;QRGLz;_6Lr_U z)dBI(9P-R$v_MTSU0d91pP?wTh}ZIa55(h2RVqY3dxzhBHa1@!W^IUmc2?HgS(%>> zHYV7(2-GA_7_0EkJh$p5Vwp$dISdt;{#f*UyxsR)L{u~-F{H&Y{(3Vj2Zt)Hgiqs0 z<1H}{ZYTUR^%i-2+yv=3of6&MWfFBQV`F2b7|WHuI7Y%)jh5x}e?yXLJ{_mpT)lGu z;3SwzE5CoUWr%C*>Yk45Ro23Ou&E{w45)--#HNUOR*wipMnv5G{_N@5;g(i1cg~|o zre~%%-QC^ItL7`8$(YfN^Sl00&+80yr#59AGl`sOdd0R&uu%m3LMMp@R?4Vh+6Y)? z&a=?qU}AP_oRpq^sEOB6SW!_?$&#=Wid`%2r%nRQDxVW)^O?P)BW7z;oJh&bM#E;e ztynvU&Tw)*k=zqn}HFnuXs@HlNYncs#fl1?udnBe6n#p9O&GNz!~6%luez{M;b;q0 zMLfK{Cr+bBM)H*3KG0SKRw(50*R*V?3zL9W!6{&aJQM?yfMx1&g>ehtNg7n7Y&<13 zCg1}p;dv`MD|c83@^N~?!om!$y1L%em@6saXoTE1{OnQ1)HYgUsdn$IUz3^)GvPh~ zkf@R6Pye9G5kD&&Rt%lYd|Oq((kw0h^F;!q=pxre=*hJZQJNx;oaj z0D!#i@;!h*>C?ih<>ePvAwsQ5pFfL^Yg1qkKUQCi}>^Ky;YT4 zR5+61&5z|>Lv>meNteRH!XMdTiUp&Ju324fz<4(n{>-;DN1qWG8DtCH-2R>6KkiFo zz%EWiM6~F(|3{l>bmhrt{ld($2m(HaRo1Y^JuUYIEgq4C`+X zl-=~SwOyNcTF+b;9-EqG!cGCrDfvD%>OU&{^^kHK zk7RVnLSvZKMzY8-f02=qF_y3p0DEe>5V#)nFw^q3I^9rPzp6+Q(lix{(r~^i&0NDT zN1<8mTa<8t=>gIaf^2DZRsOdY?Yk`GjfyAR`0Cs&*L3d_JjBv4GGZ4O6DuSl!#vPd zd78p!Wj4KnV7W>5I{EM3o^y=m`emWjvuC%ZA3AY|gfEU-PvPO*2yRzF<$$w`3y1&S zjqncsG)ez-q|}#8qC~!Oon>Ygf(gJTd8bTMbu1O?3E+Y6^2sot1C8Hk#RmqFu;$TB ziKoYLZ34NqrRo&m8xe%_o|@lzt$?G{0A?nr+hG4R9AmR6H&?b;N?A`eL&7&V32H`A zYja?3F5}$U;!-GmKA~+Fi4B`o)y?@q4-XF_d>tZU;@d#;U|{4?lmxFo+y66cw*Iq-EX<}*tKh@-Orz@>3BIb6`c6TH--s`b z|Jlq!Sja}tz(6cS5@k|SQvUpq)oA`(_;)h;E~fi}Zv%%j8Cj_0$&mkL&Y6IH+l5>9 z5;fFMR=7uJ60aqDRU4abDbdqrH(-fzt^4y8RZokK1oRExB28zIfBmnH{AD}n*^NKz zAO0Eca6W4RHZ)xIyn8e_IC$XB8=`~tv1XGUJ2G*0r6GZ9d&v=|W06qw{D0nH#G!$Pj8#5^if1?-YnB6DB|uVG?F02x(hK!{R=8alM- z9N7UQomvUXDlKK_lj5vS_X4 zJr%D7ck;s)D4ugBSA@8@F1`H(8xs`?<@vBRdtZZb$;(H&p{6u76g4-CCTt}NPr9rw zhC6t4b$9opVcck(b)pyZP=E90&1^zS$_5F>G*p_xGXan3NTewid0>Ir1rslopnYnp zkW+U5jZ0wIa$a6d$51uwOc=CFy=9#OZUep7kJFKXV*c@X@DaW5i;Y%5$mVUk$yQ%) zPBtDF^`Y+WP-YcAb;t+9273;7AVR0U_#jD!B(FY4+u`35n;efgEIJO)4)45>a^hA2 z{%MC923tr4h~9d-&ZfNSASV}`2qo|FkNePM?_>g{-Od|O;SRwMT?h%^v1n+ z?_T}pGM%VfPe1@-$yFrImh1lg;_)`8m6}Q@u=ZQibz0R6$_bq0#Kgq)Y(&3@GA4X! z;K8&ZI=>Lz&o6`csNY!3Q%>j$3&S#>N#S`O@@J~1AiwRTmpP*psNCD)hxb8UyxW(=ox$tWo!fG3C9&D1|>mEv=lH#)K_+@w*bEl2+P^*my&VZN`w{}JS7 zhx1@Ee_#FD&JLG={p{qQzBWBw>wk7hk`zgg(?mYOj2>HgAFhun4`fI#j9fQ6*<1Oq zXmBTgtwc;*Jh3w|F;ON&FHp;f2Uu5HAO^Xb;D28Dz4BQH-^mt?H?EPi7)Z=Ie;SsU zcvHEn>q`)(=Oe7c!;{|`q|1U-*Tlrd;|P;O3FuC>Qsm_2t@ma?E=_DjLNeZ$9ET9p z=ait}G9GABsys5kr!zVZ(!Kw>m!@X4?L>Lno*F+;_b0VB6W_Ni5`t=*O832SDLGxW zF0e;|6h?gfNaVR9|A`!wE1(~Ff{(OQqlT0EUc9TWZhW2yw}gbT#Np)gz6Mc;d9h)j zFAeJi>tjwe-jqP5oNdO6<3@fx2BCNR_HAiY4t`TaRf_GIzXu2AjCn#gl{=+O;{?Nk ziOkNAcSG3IkNVgu!hri2HR$_aT^uzH(UwDyHv^h$dYuD(ApuTI3NkmJiwGDlgciz? z$mQ16){fntP&2H3`Rpz4SHJ?AYK5md<;^yAWw&KQw2QdFa(Yrb)b2I~OzMc%w6tN~ zOIfipAt51Bz=kF^Hns;ClFW^-p|`vWm4X6vXG*?nQi!^3a+gXaz8Y@5j!VHJd>nJIjZBQz8X31hxQwlb`d^(bgph}6{NDY-|xWV2@;NH9YSo=rAh;J5lLFRbyi!j1+SH^D#P;Uc|L9OPZdT_}QO! z0q5l&YPu_+to(D4pFe-5KI#DM!+)Z=d_=)v@IryM$V(xR7&|}v&foW^ll@aQmgYb? zvM>znT0;&;l)0>$xF!smi$q$__mr*p#v+^Sr)w2^o#f=?G-z!Hq8C1H&wi?bINRGJ znJ^^R=7J(v?R$23hnIJ>vK}7`i%8UcyL4Cco&iT%*4R#+-ORWhi2abR4WQ27M%5(C z*yBk^N#kq_WI_qvWpXD>PU_F3Q5AWG{*tHWd&vM)-1^l3eBqdD2G}tYuAEwEY+f!KaC(#XO=hH7K^{TfZnwAC4?G-d!umuy#&`Ku_MD*k2pvxAQNv6yVnY zvgvvJ_t{gV(pb(sDU(X3PR-7VoQN0d0Tq5rgfE1QMvqBh_*^ZIgT}W&BLgc9MG# zGt1@ocdB~%xb%dAQfE}3>AUEDi%>tD5bKMMhO0~C(?%3*PAde+fIlw8Itf?iVAvyD}p3)0^Q z27daOT%4b~(uKhaE%YV#Hzw3-o}-+{xUGhyHg|Na+i$?x+t-OB0tT9VuR3aImXKb; zsNwf02>X(gOK{)T{o+N;`^aQYvy1yP7gX%+%d36CE*7l0p&!)8n~%ycl8&LFp^s>5 zi(i78?zsbwnmhOvd!4FAp6R1X(_wQ{$v%3BqJn~Y{LQ7MrQ~~EK)K65a|8EZOP*AY zXRq3i;Q=Jj9U&wLm8p4tpOaIUTIQWlB^leil(uh|pyhYkIa0%lescp!B_DBea`HLg ztNGq0o6~j5HNQL$rz|-xTxl{(0AN^^Kcd2kRJOB#w)-7b4r3A6gt`5fmM>ng(8L07 zKzUm1!<5C6G-seYHE2yaiWAJ|Yb+xz6r)ktM4lZWw z+uuOj#C3|&|e{DUciYF5=R>tgrGBgOW@RxOKwKq!$m0YPZ0t&6vj)aeEe z#V;BH2Bei29!yq~dgu4|v>c)KCT*sO@u7tbac^DggEX9iQC65_u~^bUa*LICrEWLe z$EM}VfJ-pzbcPj;9o$2%;f6aO)2a#H3tRO6~G>T)zstjYyct8%X7~`QdS|MMx`15<)}w^YDnPH>*K9nMhz83 z#q8dDryCVEt!-?2<=TED9V4@qbYondYrC@HL|0Ge&ny;R*biq1vyuq$^XoL!zjAjM z9Al^hzinfC1DES_=1Hsd38SG7zx{0DSA@6bCxZ=gpaVa}vQTa$+riHcH>diZH+ueCCA%>>^E^^KU#!ZY!M@DU3{>{k#1^>hz1rC4;E(*7E(b)< zr>$&q-{CazTp@pYI2hKSqp6gXm4;m32?`Qql)4B!P9psAWXP}(b}3iw=wmBigco6)a@BRdx0<$*Q<%O3~fRMv77UQSb&xjbv242^7x**3WmrumOkqUp>ngJ zprf5X}dz`($IHi8KQlHh|F1O||S{-uskn&d?2L{zJq z<>%$C%^Mr|*UTuOAPf-1;pMqFa7D5kiw3}zS>7LR)(lPGk*+&EtRtsfC@F`E)DOr_ zP3;agT6JmZy9DZh4Jv-?*rn?ahh8OWYaHCBE@{w`mJURAypIwbp2kvwGVx|J_v29TQ**047ihzL0Itx>6RJv)>sZ1Ja94r{cz7N zHYVnI@8MLfRSc^cyO0osy>>UUGeR}$Gd%j(wY9Z)+i`|7t06B1|n+=-KJFblB-lpfJEO$Mfit1syo= z-o0z?>|DJzV+P`SMCJ5g1+ z4sJ#`m^P15-CLittp+6E7^&ptanAAO*-oeU(d^yrrVUQ$g}H}$@3b6E_`5;NOhQ89 z>%@c}-^(BE54!2d$?xfv8(Ae^U;}dkhi+LYlRRg_q$$3e-aqw~ql(YNB}X9@a<26B z^lD&yPcSm;0MV1mrE+s~d!Q8-^_R))C$qQ%6EAO!5Ze9&`&l6^CO=u(YmxV;ZD;E3 zMnI>>?pC?{er$&E!w_YdJQ++Cj**tC0>#%0xSb@xL_$ImhAkBbbRgM{`{iWiZH(7< zPt4aIGSm6bp4BiifBQ;s!6&8_^PO3K!g4vvVMF*^;j0?t=%>NwWCRsO=$<10*x?J5GnzX2A>4NpvcDzYcB67@#1|7pDi{ zCXP%HTsZG7D+;@B8%=?X=s$`CND%Y*%MgrKNzKbSt~YbUgwh_C8NL=tXhn^U`p}E+ zL$z=osL|*IXJljy3=I`iP8&g+O3OA~^P11*M;MRvSFh8<^7~f%GQK)@6{IJ%3 zZu-Y$QDNbqcyNnSp~VlV&qzQk#IX18pWVbZTux5!f9`Y)thX%%52N$TSD33g7!7h> zO&59-sFL*e=!g>h^{-2gSoHMt5KiBq^c5-^J^k##bFn#9BMr_$c4np&c!*)}*x#n6 z(mDZSoN3qr@113JAW7!KYosYNYW-^?d7N`M07SCw=bExXha>A}>`76l6o?*%iSB&X zn)N%|Le4m}k++@iMH~Z6e1m>B8|Tv|Mo61Z3_>vms0c<)TPP?`_t{fiIZ1*w4P{7j zp{_x>Wlq1-+;PARgn8a%LzFQxGAd+>B=cF_sR0{z&O|{)B^C(8N7ZD$q7fNft$dZJ zM;~aHE-}>&;ov~ICK3%*_@kwdfY0v%F)JHuJv}@7*LpoO82GTfq)|dR*LfVM({ekF ziKat_LJYJi zrtRm0^6;e)b*Of{9axR;;yD>1xTo5M!6oka-~tky3@fu9yyY*Xsse(h?FdhJ5at-G zu%mXzJpRxrAj6Wvg*bngEihX8gco|8kMn|-vt&Y5T+Rm=5bsgu+U{hDxV-?)fpmJl zYD#i3N1`->TEuxdY;&xHGN<&qDv0FDFWj7*-(&3_1|s;}{w_)bed-OfK?A-M4>U4_ z;I)NcWQej#00B6O%%ZQ>zhT)v1R?Yh2qdr1BC12YHWN;DEi(q}e>kJj8D@*l!^5)& z!IV$6353J$t=sb(Xn#5BxW)uTN>WTD4Oo{4P2I8GYHkBADX$-tv6A43oTn= zaAQzsK4$PAx~(x=5UnoBlP-oss|gSOdxz%)t%K36g0{9cEF2sXP^a0z z=a5;Uw@9|(01){MD=X^*cJ={*?i!m3PLz$&1?ENRGEl{8R2EZEgtJ|FL)UMP98lg82NlaCPYpdG#hTa;U-|B$9`xCvgX|O^pp^Fu9(XC=D)*3_Rl);A`2R zKO3EcS&D??1xPr;LHrQwzY{1Key57FzDILNF8`B=MrhUjSLfakA1i(|d)p1R%^BRKR71|Gml6GBMDhPF%GK0G6SAnKC zvc@_1&`b$|ZPk`Va={@U?jS?}HTS=S#YAat7li3YFsjE6yL(XImI=l?XO#p6FBYQk zMoGmFr9cEC?elII4W|}E*CL*Ogmn^}m+un?J2;G_qjOT#4oHy^?+ z{^wp}{EmxK39Llkhnrb&f8H?3{+=GxOnAde_-!ZE0r)$If5Ge;m7shliX#vSiIb&W7BK{y72JLtH^ygyz8=0Npr%%I*@(FuXDAo7RN<|*h-xJx)vafFiRKepa7 zr&n(6LY5ErBHHng)=!&UmqthsKSQ4GNdKroeWVs15n-pn(D)nC&h7RYQ$t((MZ~X6 zAq4kNU`BHx1a#IZzfKNk99Rkd`vS?UbKXugM546O)^_twJ};V-jjn(A8ib=*)is1y z<(`(x{w_HxbN*n+_8(z))UqHOj=vh8A#Nw-FP#8Y>=Zsnbw; z6kr8+6HiFAG&G3+yTSP?6cnIzu<8Y%PXS*MsPXZssVe`iwJnL?h9VdZ9!DGa{bn(65yt?Mic{poz{=zwGIo$ocfjR<`J)HmN444;XvOq*rJ`67iXQo zW;@|Xf*inm#g#vhgvmUH zSJ$btbTN6#v>Re?olqQmOJ-Jhzmp zlPpNTY4Z4Sv6M*^x$Z1n1+%!FQ{3I?mZQM=Q zYs02Ym|l``!^C2(@9+6hb5Q$Epg83VXOYsske2JYT6uWDa5Um08}Y3h@v)8gfJS_q zMts{we0zAkp7*Z>j9y#MpXlKMBSCrL4~%cbd!N7eo;aj`{#V8ZZa^YZ6E$Hqx1 z9?bRN02?(u^&9x6UG4IwJ#B3EjE&7dwXtQY2lG8R)PtTjwEoJ@2U?xaSpLk;cY4ak zu=hQ<(Sti}jM{2r!e$%OF10b+bDf^rzx?%#@Svw1z5ZzD`yaM($QT<(EVuEpaUQ(U zgLivyo{g8gZTyYrey^&r^JAPgj`g&2tf!r0J;#eY{aX}j*DFe}as0zJPF&`}N)LYG z!5tnvYU3o&vz!#{$vbQ`3}>JrjmU5YxWwhED$CCkSM7^eROgBNt&ndcam>YrZeZx_%`4OGxS->?7s}sc^?shQIVO znn|wFQnHNz$uinXh7l-fMzEw9p^{{DlrBbRNif32VMtko#2HZ%W5h_b5i5~~Ln4fL zakgG3N8LMDFPV4O^;Zte&U84!{40;eI1;k+hK{-M-p5~AcPK*Y554#3eZ3JO z;)pQBkt9JzcWGhdOB2J{w4XTJ7~)R6`Qt)yMVG8L`-(g1!M)t}uyPl?W6se+F-vxk zjM+po@(YsDUy|H>n8T%Wsbprpt4H?1EnWzBVRfO%!suV>nHh_H7>m7(79v%-Mr+A2 z+DLb!y>v5zB;5#+RHK6=Ga{21kzE*(3EH>i4n|}gBQk~&8U6h^;LJI}Z{|8t?MRx?5Rb;Ls7 z9whN&nE6Y-(Zr(OtQ&e6Qm$z;$upW;)RRG|q!Xi352F>sQ5>4;z-(LSaKcXmKmM-SNXWEMVHC5c5XMWjE*882q9jUC0@6AL%b4Og4z5HtJp zPBd%;+^zMUMed0cGwSUykqVgZ+iCgsqFCmoTDaP*Sdl;QMos}=rL~9;U{g8K7* ztOtuht#B^~i;luP2orW3<^hfz0&fKMawEXkIT-08Gb@nSJfX_h`@Y`))sB55Q92u4 zq@y7RyGjQmSwf8z2{BSxsz`B~1R5FA-smRnj7({3VLD}L98HyAtET=UiC=c*97o^>;shqFBMa(Cuvou{*zv&EsQJ%hKh=JXz(`8|+U zJN<&?c*=#>-7~-ZopoEc?(#o)eEZffK74)g+&jngjgFOu>~VA6`X)^3>$kkJ#I^6$nSI)cD|_Z@cN(fj#mtlz;=X9{p&W5e*9 z8d6yG-+Qen@gJBe;XK_$q-rS7rVag`gIpCyZ)9#?1TlSJ6bgc4%Jn7_cdZio3KLd@mWG&=!(#@vz)H_Q!pbO;i!G?H4^!@LtEa(@iBbqHoj zjL2ot+z+@2Tn26fS3*PJUn&%N7J2}>mb-KfCpnry9lk zD_7Ec3lL{ZX7t5OEv4Twom|7T@)ntt1~T(u7Vn3dPU%*#2;2l-1#SSZ^pxKKs$yKm z4aEwGF% zYG|>wUnx`lx3^_>QPYd9{pNYJ-=3b;k!rx=P#xGN?;6UxvQqR83PtZwQS`3J8X0EQ zNNY4Q!l*%p*F(PHF)o6TZxV%urV{XZ=z1uVd@cAf^b~XhGyqcL3#$fM3%vyW9@1fP z5!3^UyHkWUgzSMnh4lAoTzLmt3B3xv2)zvb4XS`t&aQ#hLEE5XPGgoxI9xiUR~-w@ zf}Vj^LE9l015w4U3RP7+_3XPT>kM8BJq&#a)l*gomO`|~bVp%t_1*lh^~qbxj4CuX zdFSNDsB`t1lx)brP^_V95&dCO8~(paKXz{K@iSZJ%{RUa<-K?5MU^^Od$+acHRzfu zlE*UiWnPBggMGnjOj;YkUgU2C@1{cVAFjOj-wjtroyh@1!LqnHUX61u*}0!*E#GhErvT zxitwR;SqXcO%yHvfR0{@GPNKDOEeb3V$>(sR4jyPC{HyPp>iKUOWKpu5n~xj(#Duh zXcy>6Z&zah%s~%&79-!cXgH=b63HLUBN-EgaXU&iA#&9x(TR3;i=}_joYqIOa2ZWU zuB4M|In@&^m|noro9yC5`GTYSdE)PX4I{t}b$aI;lIzb_F%V@#A&)*lr%xc2xK2hEcSg$9)rQ zX-|4}m4PZ>rC8genwM%}s*TA(ZRKfic|0vw!>Afa)mR!M&Sw2^@)Dm=lV7rEqbXuZ zCM1iM4e&@6B#@P7(mXz#`F91js-r`gP8VX*+NpLfrx~I%Efe|JI%f8jn8^klVyb@v zUs1~THagiN4?2I`8tHIlcg#>N32o?u5P76ImZmPqmlAgqVjK$cq~Vg|%+L8^_C%dC z9jdJ@o`fT4dV#p(+?OIdt_h+YxtQNdp9>UMgX~M_D(-ReNT4+AMZH^`D6W2WayKG- z6H>5})~6g|!A}0~O|YinOA6c0fiV=khIGTHBp-5M82LpAxoZ)?uTb8b{G4WZQO`%X zJi>t_3htu4N4p_^ev1@j;Ve^IARD7_;#}^gipkB7LN0Eghk6{R$`J~NARt%JQ;u(S zsX;+5g7S}{wEuIeuB9LWfq4r8@EvMakY5}?k6jJ3gqnXvM7k+Bkxz$~QFA^82k6uN zUD~q7K*Zil!Irj&*(j7F7CWE?2jsq)cvP!Jay@z zA354v&c5yvVB`p!rWnb|l>xXd@MG7^}U1ea>^h1sH z=ag=O@`AZgBDr9G91kBX%`jay1)Cc&@WIjovt@G&BjJO&TsRagt?&WL!4@cttub7- z1OpJVt-v-`SqRM!qKqlKm?)~U_LPTG)(#5ATiFH*#d8^8VIX#^_T+=GY6XJ9h}{oW>rt*6r%Fax#)Y$ zGtM4_S0JXw*K>h6t&nI(yNk?Z^rcs4iTnl`7rr(VDcV+Kc{h||q}hCK_gJ8=B3K`+ zL2O-Ki*oryFpl-T43BB7PU1InAN^J8u4cjFu4KXV=R`|Vi<70yZ1L1!F% zl;;zN7TN7<76V_PSVpXmCb)#m3Kjzwpjs|u4RC^`gwv!8UqL$^iw93ep|1`&#Hmkk zjg;-EvM-|3cK(0|1>Q=guOr)Yd> z2lPt~-cyH%*EMo3l!NL(`99?#puB;F8dRtOh3ZJrbE@OS>mkX(HIRz5*P*W|SCQ5inhw1H?V`*9 zs;lL3Xp1L5jW8wO{r*=!U#~mS4ypBesqHt%4z2Ziu=P#ydbPicr@bv+Kg7Qu&(#(? zjO}!rz4vhD{dw=(`6Kb{EX4Hfc@EmoyL&$P6m$q)ZOx0IWAKx}kHAduQRoQ#46qs; z1+E1Lfm%m@r+J+xHh@#G3mqsWGY6(273NJ!$B@|{Mdo=jyy)hxA~G+N(QDN>o39dI z+y2FVJ2jG}%sW05(bX*5r@jT=I1NQ_tcEursA6 zh?VPmvK5*InZ5Xq685S>VUl}*<$ci>jAj)m5GRu&Q6QMa2Vo0a#TtJpO4NJs<5}j< zp>_eMJ{*S|X)*?~w>j_umc8LFuAHaFzk6s)b`7HQIqJF3qd0aCX49zSWOtTwf$C5B zt&`s_=Vb42(yj3Kqrxmg@91xmQH$Q)wBlKQTtAprUS$&6SNaXvqdDBqdG*m^UO-KA zZz|Tvw8Pxz0u-EmSxvEQR-?jfB6qkqCe1bIC0Qpzq@iXYN($P}1R@h_2S3&o-H0~2 z=qh3n%4PfOrxNZ+4|J*g_vqpMIZ0v;tK>G14M-Mu)}jWww`LGuA;jLh zOP}KKG;t4KR^N_uPT;Ml<29O7rv|f$y9MT;oU-D_6k=@tuPo3SJPZn!kln*g)HmkH7}n z1}kJ3Hqf@@J7WWFOTHsE&~{)44~Am-Y)@Y8p@Co^*3du#N!wxh3gSaDZHD)gN>wrA zEfrJMOgr%PbhxTi*&(c5EvIH@E37pcld=ZEYWIXe9g@i>{qn`oQA%cRT$*mHNGNQYXIY$`$v4C(myxu7dF|9Lf~; z7xM-Oh^tTOyT_BnwX6L4c%De^4U5;+5hHbQ)g#wnrmai;&8-XG*o+OCO#( z1W)vl@QnWFj~qF+_^OeY49rg|aQd+v@jX}HbM?KiznJ$`r9~*ScN1o7MSm!i8QqV( zN)xfr8w6_Oj|ZE2KS=m4swDbh;(M|xHG`_WYD{Vt{XQij6PecdO%sf+QjG5D$V6@? zQ#Xd-*+lM*ukrk@lxX5%kR9LPy3z=Y`@xs8F#-$xNhh zIigBG*xpR-1>kI&GL&h1Iw!oqO~Fb$W9|`zm!uNRVm72Pjn?+U30KO;Z}Kpz>AnP_ zRNf`CJzC67i(4WdIOpIP6zCQVHTXZF7cOar-4%^(Cn}cQ(?rZ`RfTmAz$|bG_lv79dJp@ylOsY8Ogs%}gZRdYI+WFe}kezZ`*lTtvHToCww5^F?6Yqn+XI z=QF|jMe*F!oj9sbER09A}=NX#&cYl4&xLJw_<9;rHMGgbmY$kpa(p21)XNOe}# zT|>ZDmQEW2O1Wyip=hxI);b~pH)sgIZi5jygtN6pTMY%@M-7( zyndIaPShjt>S%cvdFn99sK0YJcm=oyRIlo59PH{Q^E6CC1B~uRl%gf&FxmP$-E`TU;}6vpWl-+;zYxYyajUrrw1P_gWd)!W@{M^HX4k*x^|?p#+W?$jqwVt0)$$L>08 zU#hRc|MnhMA70~6BUx7#e}9__d#eiXHbXh5KDx9wBFg1uv3)h(I0SFRfqurRWczCD zIC%ZwQwjIEE_PcqVPN*K+Mu2z^*yp|EKmLZu34vd$g)1pbe`dLWF7bQ5XMb;0VC*O zG29BZ;n&ZF+h0PmZYar`VI=SMC8-)s@)ZZ%`yWcuJU?+CS};Hh7E#N!>&eTjIYC3E zGhmhcqR-AmV1K1817oZS%Kk1}!~9fjQKd%}CH)#sz0!PBP}a`+p4#4C3RR`7x%DJ1 z4_m-0!`Z|XSK_qWb9;)ba@eC^Ho+pcdhWvnHgRk5;8oi4J-hmtiq?hA1;j8xX?gT`bOfZ)9=qo(NawQuPYI*yUfNVmyfUk-QQ$ zbmgD+ttNZ7A0o!9+H|;2IaKfs4nxUP_h&GlGRDvadT@>05*3N_s~~d zC?WGvIU;6mL(#aJPa~?wz2Qc`=-y5@^{Y!`uI8(O*H2`K6;i|GD`p{upzhA0(%>=Z zbIQ~$uio_VF>i}<>$Q74_A7k%Azx-WhLcdqWzSKqt$s@HY+Q{0AjWU6f}+kC%?(%w>?lib2wKv=j_h8v$#ek*ZRIoQa8bC?3Q!}uz)`6JaDf0;f(UP z$**M!h}7>chLd?Ijm@KfK_{iDeJw@8yK{_p&lpug&KA_{-33Nvg{m5M*qW5tftVVA z>HHBD<4l#~$MUT1&KSMcc>j6Ie)v@OH74_1S)G}y6WE{mqUM!%ufKmpw$OLrl+HLi2-hozdAx|bU7gQeeYd-o*KVW}~= z-bhM~p!7yf>Xt{kV9~{jvt>VOnz41f$7<2Z;F*5{&Vu^Go8T_!AJChST7YzY{~4qK z?#ZBjuk{Lsn(C7j+(c#vcryjzY7?q}WXuX*w$IiyAsyfPCw^R6HRjl s$!*`C-I~r4=V@XC_@6g_r&#~=e?nLdedzfw5%j-unCRg%!S2fcABXx#lmGw# literal 0 HcmV?d00001 diff --git a/docs/assets/images/icon_black.png b/docs/assets/images/icon_black.png new file mode 100644 index 0000000000000000000000000000000000000000..656ae797cf4652de02de79fb5c24b37c94860609 GIT binary patch literal 11559 zcmcI~Wmr{P)b&9S5NSoaTN>$JSLhXYh$dM+IllA7W{O z{{-1cNvSDFNl`gM9V~6$SwJ96F}^XP^6f9lI*l|sY1pteab3IB(sz~#BdaiE3UF;d zX_70m-v%;ZDTIgTEb2NQn3ZoWE*BMkETlsB3#hFZV6K-By;s6uGZhiogEe?ZHMj>1 zF}>*D3PVaYtIhEQXg=~^aoF)Ao{M3#DATzC<#>CCU+kx}znakVny z!;(!jL-|9j_zF?ieU~{|dY>N(9@Z)l()q2{FCUSv2ev;L#7x^96LCt`W_2kW`CVjf z+o92c_jZ)osJpfZI=@_$U{~a-de>(B%1Gz0P(L1BSqQy6?zgO9{N4f@#llx%pPuf* zL~o=bjcAf2OYn&VpQ(L7oj&|{|9k&^@y%*b+S4z{a~##TTo@-i5Blrv{V8YNj!_Q8 zk+r;Ljcg#lGQUsG>aL=1%%ZZF%%)bL>F+^4ss_L7japs2~c`5^p?acIP}jp2=ji z9(p-b(or`q$p=ISNks;QP(kZksj{R|<#e9@Cf8xWh?H6T-8LJV`fN`_j#D8_gP4GHqBv_|4lpB`edF8sS}*7CS3lJ*XwnpMa84zV+&i`pJZY_wJvq~>gY>cctk`@ut;(i4GoR8Nh5!@A6O8PnP4o^ z)v+HM7PHm%qR)x-IlRtxm@_?=A9*geBH=%T3`Qp;&|CE-Q%gxnDWIr?Y;0Jk#KzJb zA05d*g?v5l-uU*JBG$yj#O0zmWWLE8u4U*clOg6Sl&Sl$wOX@Cn;H$9OdeL^d~tc% z^Zh$rN=(de>jwd%WP(s>v6CSwZUKShNPUTp&d#1(rKDIoh_;5IVa8W(i?EZkv%FD0 zYDp<63q{4SqM{o zm^JdcbL67H%?dnyT(*mAMv!Lde0+V^z7W4yty%C>iiKpglr~=}Mw0M`VNZ5NrDcHk z#Ychoj*XA!=6(NukUdQe*55)`mqg*gOL)^!FN?Civ-Ka3m58HXgJQG_5bJud5WRl4 zSHq!RFO^fcWhN#jEY-G=^hX;#k3E7`O@>PhgzHQ@F=X)|e(%B5R)8>aNu(fi|Ks$z zsGxvK0dnEb%*^Z(*@5$-m{12t)7aEAF zp-fiG8axSJ=e!=a-+VnlJHXG$sn*fe_5XsChL-lz?Cvu>JUkj28p)u~pTmcS)To1S zk&uvRI5`ih!|%_)MiC@r*T+SCz~=dPBNY_juC1+QH#X|O(b5WkOvvsbopQD_tzm9y z85R_TGQ%W~{fg81uX3I3_;a}oy_YWoHYQ6mHx^o25FdGYdECRp!)%TGSG`-W6%;~m z;6`^e7>^&zn3#OKKpZP7DSe6`@FsaeM#idHtV^HwDqT_d;m*(IX0d4-mIAkpA&FNR zB0B6k71DdtzhD(j4)*r;U3n^Lt6=t5L-1(L!H{MpAK+`T;m_EXMNy39a&vPl-1p}r zJ*b5o7heiG%!{NXC0T7_Gcz*E4W#j_p6$*~;wdaow%qYPdQcWOD1NUGhWTBc9@)~; z(s(=|ytXx7Xklh%_Vr2Wa15>7YPL+cxv6P(@skewnmO4($PN_?ixMg->I`(fqTCGD z>pt%ZD}S_P8x|hEc04G~(cr%S9*%<8+;+CM^7sL7o9^$fMr|Nuv^^Nhg+c4Jo?hDFh!RIncenhk{^}?iD#o8X6LY z@5R#R5f}^>OD>tjZeZ@^B}5)5`S=-Y*$(Cog{W8e6UJ9+l#ra=sq*&NYfc1GJbe6Y z=uz_b@84IKgLsq%J}zP)pb1?Vts9htuANnTciDZLEOiav8L(>LzH6JT@J%-bY z&dbZo&A`y5jwL1_pv|sV{YBP+3rwzUvXJY~HT(_`%s*HH=7TYbO@9Q7fjIEII4~h; z=#<0LB(-7JuZcWj182EkB`^3Ob38Nw?#0dkBo|0*1`S`0Mm1$6?>Rtg0>2oiR?DM?9l9UWp<+}eWdY{}rDpyUZA@3S2R z(*Rt_ke!`(8c%}G2nZn5LA4-SReWKst9&JK;UH`~p;`uM&^F9R&jheAlyTM*R4&A#3mB*xm>MBbc>seb1T3n^|Z+`d;XTMQzT zA92L272A(bPZKhIjvhteOr9RJW5+-kO=2!BnJVtMWXy)S&nTSVK}8iGUHvtw&u1Af-OWz_2#9uCmJT z?25->x>zNNwCc()oEaetV3&;`$!)~GznW~x!)~w3@W1n1i?F@9IOKW0v2}5{!Yv}A zf7zu0_R*MyfGPrAwou2x6zFJZNPP_4nI!J+bq;%^d#PHHlQ`@ z2bxLC$+6hK+;2Lkh(PNtFofQf!zgn6+V9?Q^oV+$Sd(DKKWAf000}_R{fphZcVBW= zP3%BmstK)%c^pvnAoccViW_aJMDwLQEz37(aAUUT?CI&@AxdW8=2q7+*dNJ#>`H#- zdVhP)V#oUJ+qXWOfFJX|=LWg_Qfd4SdCl#9B&4J(fq{YXyz!qN6OO1E*LQSCTe#jO zwcOCVQk=ylCgv5zSTCd^=fV5>-e~Uo`uLc;y7IbG{*f?XGxS^|AVFs}J6QbPyLAU3 zxZt`h3W3!+D?^449we5#ZfjbcinFsvUg2EJD=5645U8A<49NWhc4Y5NfZGdHZKL&p z^b|~#s66P=`bw1eD`>#!?w`6iStN8GTmm{j4#|8i`+6mVHId zvaR7Kd9Xvn&#yI&rSbYT-fr8H&yLsP^z^i%pVdT>&T`#)hTZAHkAi~es;a6n0sY}D ziI4ilx1=@mUWJ9KnPL^SwNqTXIM#`AjH-$p931WYk^1aOfYpw!VQ&VLzE^85cTo3p&?k4Shw5wLx9c*x~bTf`JNRbu}=Ctj~R8h#EYg&&2_FK!EzM8-{4C1~Ui%H66h?z3>9G zrp}lS?f6HnQfNpBE?|)FPp^Zc#P5=4K0O;X+od7sS)#nZ_&l_7tQSqjDh?&$dzVb1 zpscB>sopST3RHu`=>ie|`>${q?4pX-vvO6rS~uaX3_Y2MM_G4oukH8(;8u=6@AL!) zK5*h{zBy8I+WRADOI89@hH?iC7XASl?G8b{itieHuTHoc=c38vvGZ)Lt%+s_-2wLKzqm8E) z7)+wkzbK>p+&)&FVIWKee+5~N9!n5p;@qP~c_O+84UC_|!Z6sydVY&Me{SsK(@1aW z6U!-&;--kbKA1^3K0aRIygtBgi`pY0NFX(tA?mH`CJ4bt4b7qlNz4Gi(b3K2aW&=i z$5mt9weZl;^6^euFlwr9DGsIByX!RmXtI)x)t)#yG;mRCc8`S2HA>G?*w)sTJT@IB z{s)UCpqZQ&9bS(EjMn~h;`Chw~%<+QIAn_x=+swQR zwS$~C-(A_Yv$(%`^E9F15h9b<^*j0a_^gHzu+;i##iXU#UGRdfl7I7U9;^sv=^rq= z`A{P%pjefal`D7lY}k{O*tN?7?5s!n-=6QyF?$!(fN*JNIWIG5FZiS7bg(EfovTEs z_pbbsbvd2-70gDwvd(EG5N68R_vwnF984dynD^Rd+gw>O3&(3;8wR^j$Ix(q z_crF+8u_zlvN}>tfF%;or&t>;zRVGAsd07L8dI>{FEn=4K|?`N8&GoqBsJKb&V9j` zKS9qKx7L#J+7oE# z={w^}%fDI70yP}VF$VAH;c8qV5_T(sY3?DU8sPWBLf|PH_Uhp)CC&y020w?_w%i&N zu+>yMF$V#o_cC8IZ1KD8ObVrVQu7BIF5vd;3^-EYZ(#ytv?|oTz~B@ z29V>dV7M(I8>CGr$U>1Vd)Mdt{Ee&*wM(e7*lMLkMSTaoR4y;Kc3Hf z4<2xT1uC!y>95-1$eUUIwis-3A-bHL9Q)&LMm|1G4C;~Qm8xJTcJI28-jM={7}?P3&~jV-mB%Jl!27HycES7Za=ml6W?_D- z<#^QI1W>H{W*|KjBgKV<_2}s6x&chU=t3(hxZC}uIyJwy5=0Qc=sNgzNO^aFCO60X z(@0=(IF%r&0&{VGuWU7Vu}YlK`60+u>op zjV7t<8Qud27=G*N?laASK&mm?r8Tk42`}ZRZHt^unbqIS0W0Qa$E!%M=2g8uyT$t7 z-Cec?j}@$j$;a*R9u0=4ZGrQi>*Ne=1~8!I%=hP;5*UKJySp>(e;&6Y<7^z&@H@== zbZ#F41z%xp^aLa=)t>y;6E)5VzPJQMzlc{^AgC0D`?9jAE;|X0Zi@P1V`E(|uQxY0 zwVEhE5-dAHaT*jopDOWdJe|-hDnzjw-Ft-<1wwwNB-;w)D{y5ld2UTj&7WGQ6#}>| z5Rvsa!DqgAZzD+gUwH-+3At?0bMzMP4a-sTHY!(cVA28J_K*G;+@d0{<0vK)z7TIiq-;_=^hI6JTmw<7{ z!zN*UZLS?$T~Yka30P9GWX4$OLayA6ZFW)itSi7bOz`vb%X9Gn{~nF1irYgyS65dz z>NWI~dba@w;b)ToGq1bM1qa4Y!q?L3byQ<9g6(T>Pfs076WPr@Xs1zkBdr57zA5MD%VIb zadwA;Y$`cimBV#@SypY_0H>&6+X+b9$_pVfVK?Q=SQ>FGF!u;IOk*o1jQH5iC>#^} zd-k#&hkruh&!4fDeOCl*H$f-R`@8-7lI?PRo87#>Q!V~2N4YXVJ(=~k@Khp-nMT@H zp*L&_zEy7}BB1}JKaD3+c8o~2q<7l3nE|KUlW7$lA)Gc*7H_>ht&f=?+a61w2+7>7 z;?vXFfPPipZOzWk25Q-&aNrW?jocF0Nxt~4LXusU>4UXX)4pV`q#bXQcwmCjuaBn6 z`2|p8lL=()83&F?WjzhSmYHaXW^Hn@o`3K|-`sEE~Ia2Jpr z^->v}QbG{5^n$Ial~%)w3>`Mx4>$8lO4wb2>`j!%R29FiELqPx&mf)A$1zA3aB8dM z{n=qb%CLDkwX>FFqzU~4xD_3$wiY5G{#-w~KQ+3~)iV?DxhUJ2Z)%{&m(ob;c+X%E z^?DZi#@&+b*w^z96wX$kp7~S3e+EUkwf>F$6Ji1aaLo|kPkzxa+MFtS8uI7%f)PK( z0q^alXxC>0qB<9T3EUm2X|7V;s<2gvo%i)_O#*KS$W*FHs7@fM`25<}_CDD#=@#pr znS50rT)kFcQW-fApDf~8@qXP6WPB#Qkm26zbO`Y65fbuEv1V#?be5hJ-*MVC0zy+eKSV;)~MwX^1NC9S3!+wCw}(?!&V*uwY?-=z~sx*YoDrf&L7FG z1^ngvgeR%xT5p0Zc?&*BDC$r)Q&UqH;e5arew-7U2A0MzKK+^a7+0wI$=MgIE*l4@%a$NFClxr~xl5+fK^!u#*r= zZ~$=8=k`N@x7ARum%pDAS|cA#WYhZ&l1B;01aMD_XFUi(+81J3YFBaQ~xPNeDsGZQ&^BNHCBnzeK9=d5z>@6KDaQi&?rJdv(7 zs{HplhcbR`|I9V|w$a4(>M@ae*9@v?-p}IVegN)rzhoV~{I38(W}mIJ$*J@!oR=f# z{Bow!IO1_W=kDt8a)v=lCGvW&VQ@^gsHv>7^G|Hzb9VN`-I*$ldNnhuhx2vf3~=&Y88bw$cYf7VmL|T7 zM^$oecRJf&D0P{mF&RQ-%2&-CXqnIGn}I8g0{TU)Sq|b zRGk-mhK}V(kx*D=h`HO~VK5Gg7nnqxi$BvXWn@0llNm&@J1>SZ>OYt}ujLmgTD%9gnib%9=RZ3*#s?=v;VPTs#K~NE~%hkAGve~=a z>)fCHMSdi}+@(Vk&;yzNTlkCt+Z{lr@v<&nzZ;LstL;*~l0&iElhNSBAJq?kzzV=> zaN4SWD;x6C53sQu%&_lu`|8kin)vEmCPzG*V2R)vJL{QOC)kU=#gqs*c!B^>IY zlhf02SAtiCqZ12UtbFgL7P%~J#+r#H!vX##bwCEQcLVc8K+0e}=8YNtlA*WEObZ;Q z3FiA~07A&f$QGWSf;~tX>t4qXgV2;0xns|YX3FGMR9t-o|Ni~^d~RBWs5XSom~^JO zDX$~w(G#+TgD>-YZ@kBrwfl8arS^wP$QSD zdMYVzFBXvV6|k~}4d@F~8y6Pqt>rM;AP`)gPiN{yBAtK(-VSHw;u?19 zSvsPBjk3*%^cRes2#P{^e9DYX#&ZqsrMqW4d>T?x?NdN|BY-z!7dyIYlF_jFjPw0{q4 zi?DuITvVidwlxkPyZXDfM(@5e^}7J6mcpJzT0<($;GG^2SBkv~vteV2AiOz*-FB?K z3|`ni)M<4BWlfc|gsaio!!A+_grX%asbN zPC4wIbFreLqPdOD4<-%{LPS-&)c>|nz8;IK*K`;oxuZzvx>Bav;0fIuD-A$kES`aK7exui-An`XFSzy#t53k$Pe z2{1$iRc|oMWSG(ImR8Y6vn5hQ1yN}{36S|-knv=(0_LBw>6-uOVh@U5T3Uj3h7s_1 zJWkvXSI>VP)jS2LVRevhIIA9MjcfQlp(;gPBl6U~DabjX()s2P{5)#}m zUc_}kraFO3&RX+K!#M}wG4yX=3iS5sw4%J6Yv9r38pvSK=?V)ds7N46krR{oKyE@8 zntka|Ti+vAJ#<(0)a!)Dv;+MTRHC{E#jgAK$kkn4tNOaTqY-5&`({2{#gn}`y~M=C zSRzP6Z-0N)m>;eeLpV?ya2SQRGKeN!zteFoKxl#WX6C;4hxz^jCf>U_nh&3v(uO58 z;7UePi0bkoO5Uy#yc1rNI=~!m1UXYiLgGWt;3&|5CtKr6C)<;%pidwa(OE%C$^!US z2NwUIETtwPP#^Mp`*yq)*p9C?H5qz)dpm$kVFA%A4FFMj6M6}TjBM;&l{$-`=2@GQk9WGj0DY1*ij}>5uNs*G))|Y~yl6na^toCv_ zjj=aqps0P{PK!<*)QA|wr5>owI8dU90p>NE*aT=o;97VvB&Dc`K~Ci~o<93#=Rgaj zI1#G>G6VM#K#dX+sq|46FNlcT+1)%TI=>La;(&^AyYM8Ctv$dSBB~!U14VSyVlwM* zE3hc|qbCeQTwWd**Vk}DLl21_@^jGmP-q@&3Fg2;LINoV9o&zb+Z-4~n@pmdfC4at zhlRzvWdE}4SND1kDnCbYs$$d3f&CAPszAXUmVmDJ@ef+nd z4n3=?+F@aUPz5{6d#_H8LflVZ=ksMROKGxKGs5@<;)zBA;w7)76o+Ut$-@p4^S$Z@ z702B4RYD^llh9A7#1asppFpUpC@2h{I!m>ET^s73iOvVz6p74evbX^Q%~xA2f}qeT zq?;xKEZtRLCaY{?R=JZTV&J0~2Y{V{=uHtaB=t#6{rZ&^ROpp=QHq74g5{J0B!GOO z2V6o^$n^0%f%w%%&QV9`lRS4Hcp}>`A04hh2xcs(+JhDuli6bSbbRoT-1PJ^ns49g zSK~6|a?2V6qN=!zkY*ow&09eI)*oRFnlB|5(IvfwhxD3=C$LN(3 zyFnMts3+-D(2fLM?g)h<*wXIMJaCm}n{rH0keGZ{N`rjYQ5F8_GZqf;ErqCbP$RNX zSI6flFU3b71~YTd#5F$X1DUP90dvYw2zUSjSI+-9N4=Y^E`~Y4E;n+bdco}kvo$KN z(PO}KQ`gniLGNxZfhPZ*s2vXLM#sTPQczGhs+u&6TkyLsej-dgYeWkMlmV2qr?Qeq z(4zP0?7lW3$V2(cDKw)aBj$d7%`3uWpuh#|nwz=^!uy4)5%|M+6%nNxEP+hXQ+c%axz?KRf{K4Y*ufTr_-q zXP-ktL-B@(hsR6~?~Pb|FV~VH1u6ag{mX&icLLrZ)K>x9F&6YNbYES0y*4u9xI6d1 z*V3ngXq8sSpqI0Xlwd}D+%<6kuo|?-P~RcQ%_zBpM9!+ zgIjUVKqwvxq;h1tv}JE<{Jkd{5`@hK@HCj8O_!E3WC(+Q-XkaH9BzyP{!j*zgxug; z5Eu|P^Ycq)5NfRu(Deen8DRv+Sr0)6+8Um>+lxR*fMs`o|7i$;hXO8SNSUMQBv!7d z1WwBUvCfmD@a&+)4+6a|e<~~qL-_z*msF^ZFo%jSs81nA^~b|M9!I4LNn@`Mf}LwG*tim>&?#X5 zJ0>t%NdFxaW<6WY7K#LBJgKYYcD5yk4AP3J>D#fo zzRn1g)PG;{SPo^B$3#3N3=ljTu75^mY+74gjV6Nx;B`cmTMyHK0i&t>zD)nJiX(!w z_EG?^Avctop8it@1mbo8dW#f5q_&Ie_=41}fC({X1Xb_~VATiH1%r&!b93VZ?l3jJ z+!+pX$^<>L%g)s6yxiQFr;t`=AkG;a>NoQ&8Bvnp^q(<_X%>Lx8l{54{u2k1&%(?c zlj(Q$W2(u!24OT6fOayFRS|ueZ7`S|5aEdQ?sdM$CL2k{-1UW+7J%g@CDQ+C%jp0A b=CYUK(ob{wS0;((5noV{c_m#UX&m@JoO(ZO literal 0 HcmV?d00001 diff --git a/docs/assets/images/icon_black.xcf b/docs/assets/images/icon_black.xcf new file mode 100644 index 0000000000000000000000000000000000000000..430e7bafe579b3d563b40f4bbf2faee7ead16848 GIT binary patch literal 36322 zcmeHw34B!5_5YnEne53VlRzLOAqfyjLIQ!vj#@0JXsw!`Hi{Nx5f@Og#-%!L)VNh_ zt%_AEO1IRi1za%IDzw%mR{zx&l}15DD9@*7c~qUW{mM%hxR>m_;)*65Y6KOC_qa){CQ*8@E1b! zqj}Ma;(Z=hh{hxG=HS$IbI+YVXI9;u3+B$mF2m0stX^>8CFjg4Uoiiixibf6RL`Ds z#{6^UUr=7HsP2L}Gp3g}=>M?6_;|)=d_1gN|M~I73(lK<#@u;x%7-;z>yl4x#W(7{ zHE-^P<#XqqaptTA{Kov*vlq;|uw0K%=sVp{AIxh``14Fo*g*G^**)9)2afkxa_-zS zE|_0FV-|k&-02sb%?q4+#{79h&zq@V;h6C+6^&ykX#LrrcqJgl8IAjSuJm{x$W$cs zOA`7S3H|bfer7^HE1{pA(9cDmzvuo1(BW_M`~%{BAXCM@;19Smp`SPpu-pCfp5gXz z8}6jydGY7H!@-vv9>>SVaf^cq_kRa{f4#HcKheRdoNxFG%yDpfZ5)q_fmF~ zb8zzn2cNdq!81;b;wEisQTE_`x{- zwS&L)j)O0LF^=Dg8$7399OgPU-LNnpas6?ZkZDU7CboYZN z<$29WiSAuCEhoh^O008EZklP3*tW%unP#ZO-kg(}ZE7X9y4ja&220HTa%8@#7TeJn zG6Th~o={|}#J+N9Z&M+$&GUT4rd(qCZp`g#%EUfb)6evi*sfMzsVNcrSXsI0BX;+^ zisVHkj6}?MWwc3=CX*(OCR2u)Y#D0uq}Jri5EGJWQzQdTZ>hrIRTydOEB!I5Wfi>_1e(Sw><5E_`X6cull)-fBBa zQcSG+#*IxlD4II8J=!Q4CRTCF_FcmnMH6t705h#b^@{ z>1;9?GQwoZaFZ+bCMb2LKx#~(49421#@ZN&wNb@K*58j`n8>9gTUhKoq=j_JG*zr5FBhjo=}7D+qbK0aZ3C9qB{JNU6x)0sygXM*RUMFk30iR%ro`X0f0c=qEV1Z5m0Ahn9Nvv<`E#(+*wL@!;yx^|FtN;AW9aT4Rbg!78@;f!kL!<< zUfkVVhP5tJA55_>q6*F+l}7tVP(epz6WA77oE&sm4JhSsP}%UA8C8( zTcxD*{kzJ*y6un;%1QnQe9)iFpmbe@6dT*OtCAcd_w_32Wo*i?2LQ>P1HnDyl0i~v z?4;-*uoPRisagt*O@DbXklZ!|?8Z*EL%_#u;fFOqvaA-YNWM`gxyBZJL?rhQ1(%Y; z>LnW&`6`jjZU6(bIUf?qwZkOS*btanrjZ+mgT>k6T|{z2qof;~y_HDLA0cTtK^Ku6 zHP$ZOxZbch7aR_b zNUqoj4k;Cc4!=xS4mvWxIW2SakVm z6H1{Kcc(Q@zvb6|EtY-VYaa|BU+D>%NYQaO{cdlSL^l1ZbzEAOiTD~@SI26htar2y zNj1^HF%N7V0mXb_z0b(jF%n&KRi&8#895A6KC$!5^PBy|JE;L|vlLHQ+Cz9|S$G~J zAl*pC7)U-7AP!+n%tXuz+q|U&YJn|%wlC&J%y*3~hQ45r-qIUiwgX=41r5Sx{=5j2 z#CjG|udv_jF2Dq_M{LW-{IGQ&Ladmbjvf8Gf)d;GonjnPwl*KejwN-~#`gMS0c4wB z)#Ll2*F)$otH&9O))Y!Cc6TrQM%nrziR}!h;a96ZfHbl82=wYc=`FTB1hM1Ledv93 zQl*g@Fp;zeq3BbzZBhu+i}dfTUz8@U=r zmgw0KI=oN!6MH?Sj?%5@E%lcgIiZw-6FN)KSr!0I%6`*RN`(CgRYgc~3buTR;K)Wea zfMqQYkOasv`GEHAv>;ZzTuNwn6styZAEAxatKRYup|wq_*E~v6A;VSQSxRUlrK%6z zL}<6BtD-bb;i^p2zf-nj5f9EV$%meN=*gA-WmxGTSCdLWq56V6^#N^?NxeahdVv%b zfdmzT{)9kx3P53kAS_Ue+Uu}Yt9$t=kK)n2@U3EyvtlD#2I!f1HlCFRR$2)wA#7B{ z8-BH-A4p~C{#LF=*>W%>8fYtE@~(s!LbEN~08^}H+(FQS*=W`stbz!c7Y)6RC{mz? zUsri{m1$SWc9m|b;ZBnZn#I_7Zsgt@pJX#E>#AeDuLw!#jlLiu}z=M&mKF&a)~wZd~1mJFjl`Z{Gpxv3#}EB~%f1v6}d zv9g7Rrtxa1rLopAHc1=JO=Hw_OY^N`Y#J3eHXRZdk7;m{3Ywgp<_giSXie>GtJb3uhyjZLqHd=hduplQQwAO8f(aIP(Nw|W~HTjg%R zvi{A%?glL9R}OVIAnx4P=x#uGdS;Zw@Iw%=x>U^CF_w~7fRdN=Q8-lL7=?!^oUHIz zg(oOHPT^4sCn+4Sa0Gmiix5sU8nk*Yej^7y$L*8xv-{v^+%OFC+J|s5{;?FY+8^Lx zd^{U++Cy+I-jf1(@Vn5IZ5YDXJm}ARTOf!XQx7-G#4I>+plWwj!x`Y&21@;W5nKe% zfuui_0p@xq7~&!?1m@XbU(s_g6t&RucY?uL&&RL~kQ^v6Sm zyWojz_*=4f!$NWwoHVc{@XAXOc9onrIuDZbCOL0F%aZiwUrTS&;Fh3kVyHG6BtN@f z4|j%pOuZgMA)NM7+5xEZlRHv-r;~v0_p$r+CF)Fnf1@+sapb4KZST749Vb2Ye_8|T zL8^1&&Uc!vX~A~n2Fe67o7Y2I1nm;EPskQtWltzsC9a%K7CKz$d?}M~%0v^*#e{1z z+jGaxTk^UUaky!%>2`Kxx;(JLbZ6-ypxL{Qh2c8;aTv9?Pl0*czZkCc_v`734_mn6 zUoC;n{PcaWsXvhooBH8f!OrjUgMANKmqmhKPk(s^c-6_nVDw&=CU*0~L zWLFam_gDWo*F-!K*;Iv)f$8%7JQLX{vIgIZ9=cDi0_$7j6}u`>X(B_mikycXzr*kS z%!8XMUWbsl)39ebLTj&ql{w=DpU70~Sds#ZIXIi|5qvTRJ8nsVZuC_!z3=0bYV5cI zm%qIp?C@J~Qb0G7a{^%A{lF?ujNw?Df?X}R+1GO6_#e3o2WXD zy@NeX@I3txT)5y3XwffYM^&cCMR5P8KAkSofgSmoINd3Exb3Al_U8x*@W!xf&sgyH zHXM9=zLAeH)SCyw)G`x}lWVbKEn+LWieQ?V62YDY*z*EHEMCljF{k#EY(zR?&*OMf zzwpBR)92kBk*{ITk0Gqx0REi)DkQG)*zx_K$T_ehrL4p!HP~?#o^$kTUXz?m$;p(Q zOi9Z0??{=#FoQS2?pzBu09hbo7#NGdxC8`FR^)2L0u9$1^3>J@RhHIrs(P@qZ!uxd zAS7B$fVQU=_jZ_kpzp>y4UB#s==)xrGFD;G^MSv5f&GvYe84I2Aozh(!EaN51MnWC zDlS1kALjKwX!)?IBlK)b09-u-{)05&OjrWafwOd90WLp?eFfn3(CpfBu|I@;IcOn- zT4bSxAg^R9&V%hW7u!KtX7hmaAzwfTyBIY4Ixynk4iNgcJy4{7fITCDYk_ZsLj63* z{5s%S*dBr-bM|1z0_=DgxC6Ka-`tytPqO!7&m`>W01g8$0Dcm92#(m{uQm~tAaI_# zN>`c)iJXGC2uHLiM(}L0yHP6gXvp1wKjz(BcLUm~O-pyT5cyi2Vj?PuAgp#u3c_>c zOoi7e{Gq~!6s}VECx!1S{7B&^3il|CDeP9bL*ZtH8x(dy^?n8+zc0dB68I^?eIKK) z8b+w^{qT#-=*A`70tPx|JK}j;;R&f*k6_;aqW|ON29c&LW3yJ&i-Z`DcROa<4lwO` zF&L+x2h%=rH;mDDfw7O?i1~CbqWvo0gbn#H^8VFyyu;&;Uxm->HAoDeaFxg%kS1nD z;JiN-5rvbt!%fv65r*~em5ICsJ4fGFF^=DZl#=~;KanF5EZ|=ZMSM?2xyoi3@kb!9 zosMt>X+ea-;aefN?a4s^!;p0dop})P4ZUB&Gdey4*-y#IlAJ8b$)Z#sNm>2}DN99M z*>)7_Jj4kyX3$Z<5AOGTMrS`m02m8Mr+XpPU&f`=x#+qfKM%_mIx7!uqrgbrn0dy_ zGgh8XOP-TXO4lLD-(lQf5N2;i6oN)}I@u|7_cP4Q$xx|;+FhYaH=`CAy~rp=D&7NB z>W~VCA~-<{nPA-V6?(&M>JA1=$})u)DI7y+1A7=zDXSH>Dg3&^DnjesK`VwlukeQo z=P8_`a1fzQvy8x$4GMp!aH+zZ6kehb(VIBt1ceh7)+(%2Sfa26uzf1??D}q z2@2;c{Dr~|8Vg^cqd86C9SYZKSbT+sA)Ko)`4ITBo&%A|Z5Rm_Chryq~UL@L!KV?^>LY@1ARR@8Q7vbMHG0Qku}9^L;vf5aRo(8qfM; zjc48L*Lc>iD!hl#ZcWvw)}s_IA+$SkG`Mw^!WXld>(`(WuD5A~>$V(?ZCyZU*QaDK zRj@_j%NfieJR+S5g)e&2m|b`jp?%pCVBX=^3GHkCRAwa3CA1$GsI+nmp^en|nZNj? zpDB%}`2g+aA}_NXf8u50g|;)C`ivy+Q{Z_14|<;aSF5zSYk(ML z4~R8Rb0ZIl-RMI%F_73viO7s6LuAC`y3+Tj7dq*NGTRZrTv5x36_Yk6cFgsMFs=+% zd9rcaYJyc^twaz73P|8Nm7rCD9_azyW*ABDTo{3+&DicDE+=s)vC&Oy+ z4a;p?@K@~EJ_7grmF_ICWP2ozw;O9 zpAw;A%+9csR^=Q9+{$H?UXc$8IaXRJ(b}_O^eEXgA}}uuLg$E2kd@FGK(FC_c%1IS zV=C!HXC^vH74pmc3C(49o=WZLREmB3t|1jVLu5lKgg~MF6 zYa9&O-tL(}kD6VQJ`koj`_cd=8KJTToon`?f{Ns4u6B%ut=mt$WoG_WdrEt9v_k-(&9JDW7FIixe#c;i3W4kw8FJz=e zaJ`U$7RGT14wqbP(+!gcoCnz<5BxL-QbG_o3*taNa0b{uv2>E(7qHhJKraMln*t6W z!o~VWuYf2#m;Yf$ZJj!lh2maqG3a7{egO@-lk5#48Qhmcc@@jWkJ zBDB_A#f@10^m&G>2SI2gW&Qt;17Vu z10%9tYJgV*7Xu@fUIM`PB35xHG8S(Fei!&M;8%g?0zVCW67Yk-M*!ai+z8wT+#k3V zI2U*pf*vCtbWVWBWh11*!+_rat_OY&xF7He;7s8A5em5n4w*&39{^tgjFJv=4)7}A z7T|}0rvTqmCDCgRE%W7@Xxfme9fM@^zO^eCw|#rT?3UvvH-GcY)~j!L;I|)yy?>JK zjknayd@|Z3(U0%`s^4d#g)@G!wLzk9-8wlX-9-H3uKvxgYKd-WKf5w$y8TV(J@CrD zQi*JN=KH7i?_>6+)cn`QcdmRB&b_^M;~yTn`J5>wgUr@IpP`3+wYmB9+23gX`lPYd zg`O}4;^cErKKJB$|I(~?a-$HqgAd$**o#K4<-s<&l_*Y31aSNte>>s8O?Uh>Ca3$` z4FP8`aTonpM}BTDX8H0LZ>|r2OI@OUoMBmAoUF#w#JKU zZ(`wHrx8MCWMhErJ`_N-<-76mhCnt(&TbhD_1W_z4mc|ZBWmC2g#nxcZE}4e2kCJL zsskS^-;U2`=0Yy8D|}Gq{j6Q|W=bxcRdOdTcp>=b(Rql^kohnT92vvWPcU4=#*Tx< zpk^D6j?@O>lCrx;BcPxF

0MSqK;0R0YdN>ThtL>$378lh`%+NCUVZks*@{ApY2A z0>~4%0-N`TAi3D39>f!$5AF8-f)J!1xyEPgnY1j#4ljg+6FJkEGh;aVx*}}Fy1#l6 zIQ@$#>4|WTz36T#Wz39?O=g)6X)T6ef>_C7)(%(|=mRGge!edxFuS3m#Gur`Ce(#Q zKESpS7~lfr18f_EQC|u(!In|zd>x&?4uvJ5?`BvB-WiCV_bK#V?1d+J5ju}#K;{^+ z51RH(KJ-HCkT$U6B$yMtkE8ciEdmp+LuY9YYzW6$sOQ_J!G=)#33{th*vak4>WQ;Dm`>BsT^eFas>74 zSzL)^5`@acHnsJEHL5jNV$aRUz>M-N%44|*-`zp@9&N@$`Iv&X==lOjdA9oP5bH}E zwxtlW-<}^Sf{W5-{g)|bvuYq~BJK|Gfz6a}4I2j+(o|yL`lUPU?p@MC0VJ!#K z&|e9gzgu{#3T6d*1Q;C4@?H#7smAZI03pi{*3jnCUW=<>P7FtE#O@5!m!e0D7 zYY0(JAcnp?n=pkQlt;A~VcIS&MtG!Fg}7Hs5~hBvB?*tw3KC1TFk#v@Elk*=btZnJ zv8AX%0*PSCw-r z%eggJM`*MeBhH*EtxZ_?jw{uTWOHNISagHUaiuzGW-QOa#(Evle*!)%vXOcXbm}tv z0V+d#&B1k&4+v}s9}xHeDg3=Dq$ak#kahR$mf58g@E;#QG7{@vluN?Fg78`_?3JTg zTgN^>mG(vZ>;zVmh;>f$lSaheo|nluh}hCOIr!fGy6lV zww(oiOf2o(4-m|MYq5#t&U+tBdsr{zI4;@=r^lOxCh9x-ktnqA+Y3yrZ|gfSd?O1m znm2uA490hq(TiqGZQqBII6)XS9z*P?wLYyVzE^AOSr|8l>4$*Bwcn!f2MXWd0+j|0 zNV-J>l6DqoDAGbgT(ZU@t@3Lu(s$ArgmktBAw`-s{3zO@fk)961|HdHi^d#9TNrZ` zjkajG5fD~*v<1908U}Yx4qIr5y88WFCHLaEO z=&_6oIqZR4KT!g3`feE2XN6F)P!{<;tF|*}iaegF;0%@8A10N0s6nVt-leSVLdD2#DBu`*D?*oVEJ17Fn`{RzV#hb#{^;Hza%Y zu$+p6O<$>1R0GSIK8`KkE{@b%s_84KZ(vJnsiv>gTB?ENg|vL7uVfZFKGTw_=_|FQ zYG65I4v(egsyqDiJocrf6V=Ruc`Z&^n@l6Dg;oR&tf0LIC)1Rt^rtkzDf?wLn3QbRgq~wLGV;q>=qbtTfKr|~ z9;)TL1vzO2aLaJgQt(;1Yi?mpN+`4kIHBLt<|J$|Q6u~Xc7Oxz0$;QZ{U_3_v9eKQ z7sE*xX=a&zEOb`v$5wyXP_e~2{@7B}LbQd|{jsI>$)hc_Fks6mP2QrV$s^5Lnmme- z8b;_eYf}_z<`Fty3VY;bpd8p=MY1K{Y^_sl6SVK)Fy8CFkl(gXYER!8mN{_lALo*qYAaU@5U)iz1C+ zGf~f|JNAr}ea7Ew)Hm;WC!BT(Pn^7+f7_=+8t=XX<-oF|pI2n`z~kKSC%#9;O{^nV zarIwv8)i+@j-86Be#yby#eb!Qdm3KD_&>XSx#&ONjJg`_1$OW^9WDM^SJQxg{8h92 zUuTyec z_}k;+IDVdQ-xuFr8pl72;|Cpl`H>F3;-omf#=+rR9eic{g|%0f#wq| zIDR6I<1Yd>j&CM22Ja)Kd`kV)0;myCL!h?cI9Hu_C9(biEWxdPAtm*L+~m9!$$cYI zIOA1jZFS-0bY0b8O2ff;XiW

e)9;h6VHi3Uk`jZLZ}Q2L=>K&Lm|Hm)u}HtDcP zX?7SS%LvlByk7>S?1)Gh)=(x`OP}W&^N?}x6Xnf|S(JW2%MU!W{2&u?Wfa66w&8g4ml#ihL;5I643_?U@#NZ%R=}6j0 z#t>+?xK_pC9s))}MoSLOIJDvLNLp`bx}n{MjWPhwNzvAAPOU|!-eQmcoyIlV*=TZe zUf|nPd6AYn8trJmlV(El53+5F0u&0EhOX*RkdXS3bFa?5Q+uK!<&2OsTIwS2B6P-) zDj=IP%6Yizu&1B#b~Hd5FNO3Y%^{o@GXih;gB8AudL2yiCJYz-M3(;P=y|&ZVhuVz zTlR6}9*kx)U)Wl!5)q4X@XZ%|0|wP+N=|Tr1ex~WG`=%~ukYeCt%M@LSGqc{b(JnB z9fBzl2d70OD%j4OKzfwy#P@-~5rVsO-wMJpbyaRx?RHgfSN(QXaA&Sbmpiu1a?dnj zgjTZhmo-67^6|QgxRRWSF0GvEju!4};;uICYUIvikS^z;xca%Pqq~pL{aIpX(mNOA zK6YJsSarOe#O6#JcltP(w|`*6z;sYc;)MI#jjvYuXWJK@+x`5&t8=#xyqm;3bsuCJ z7OL|eH|n&B? zuu9irv5_Q^+>vaNgw~4p%0aj*s80-4 z2&y$S+>2Ajx)>bp!}|3C`!ReBB2U2@^#iBkLQ;SOpm(W?OVH29&-FoLyfQLif{bU% z0Od;q&crP<&M8ax72pE-W*aC#ixD!>nv4AmQ_evPfhuRAh43IV73YDj<2d#qic2f zoxA+q1c=j-4I78&LLfn;bOPC zaU$;FlrG0~@TZB(fy?h-v;BG3;_$&l4)65c`*N{+-!6&lxg%*!yk{(k{c?jVOC(Z% zSLz^kB@tILaf!DpyEw9pEOus!I~N>gAgvB_aLEW6hw@iQUTONY5SY_SNrXJK0&9-x zhSCTp9GQ3!58H#s8N~YcD(;6Rmxb-VSaQrnEXI<{#&&NkIc6jlVaeqH7shc23oRGh zTz+}Ld02dTVA45QXhGmCEHZ{5W?+#KOJf?AR{?v?%!~|SI;H4BE5riJ!+8sF3qj=z z6kjMG{=KFY*2L;*edA&PC$Ka+;sGmYu~B4T)(IywhbN@^E7>rYwzIsQs3u3RO|;Xy zHhNxV2=~F#o5Wr;-VKX%@-9TN%Y_{L#oUep#=*@Uh0n(jwHmpYX>v+zT-*n@YCH>O z0ZO9}TtpTlK(wm?&x<9;5fjoe0QV-1$a67exC>4lr#WxqhU|v~wy!H*}&S&@P<3E~%0@$B<~18AHzoIMO)WfcqNyCUPJne#4$dBhT= zAHGBZegz`ZbcJixqnX?8{+k+)HMak!f_C-OMyD93Dp0NP5C zx~Gx%(f96(-Bp+^tj$1vN8z3%uo}NN7}qic*U2}GOEX_EPX5nPVi_VD>^QfO-O)j@ zp5J%EXe$->!kE*$pQ3~`Q3ivky!c?Dn*`kzxrsk5cGDr^5DAx=xa@-RBAY`b9GSx@ zEQ>f(5{gE|^5H5*D!U-bA|Zq+uerw2zo>h`J>Uq?6qTi;Ovlp^lBLzDH^&*v8BSS) zBoPuwYmO=oj?#rW)|#rDN^BaLW&vi%eR>Gb>(%KLypptIHI5KyM*_%h%N~($B|5!WR_z`d5gq#KVeNh zRDaFYUtJpYziH}aOef8~pB>Y|xQ + +## Disabling CPU Frequency Scaling + +If you see this error: + +``` +***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. +``` + +you might want to disable the CPU frequency scaling while running the +benchmark, as well as consider other ways to stabilize the performance of +your system while benchmarking. + +See [Reducing Variance](reducing_variance.md) for more information. + +Exactly how to do this depends on the Linux distribution, +desktop environment, and installed programs. Specific details are a moving +target, so we will not attempt to exhaustively document them here. + +One simple option is to use the `cpupower` program to change the +performance governor to "performance". This tool is maintained along with +the Linux kernel and provided by your distribution. + +It must be run as root, like this: + +```bash +sudo cpupower frequency-set --governor performance +``` + +After this you can verify that all CPUs are using the performance governor +by running this command: + +```bash +cpupower frequency-info -o proc +``` + +The benchmarks you subsequently run will have less variance. + + + +## Reducing Variance in Benchmarks + +The Linux CPU frequency governor [discussed +above](user_guide#disabling-cpu-frequency-scaling) is not the only source +of noise in benchmarks. Some, but not all, of the sources of variance +include: + +1. On multi-core machines not all CPUs/CPU cores/CPU threads run the same + speed, so running a benchmark one time and then again may give a + different result depending on which CPU it ran on. +2. CPU scaling features that run on the CPU, like Intel's Turbo Boost and + AMD Turbo Core and Precision Boost, can temporarily change the CPU + frequency even when the using the "performance" governor on Linux. +3. Context switching between CPUs, or scheduling competition on the CPU the + benchmark is running on. +4. Intel Hyperthreading or AMD SMT causing the same issue as above. +5. Cache effects caused by code running on other CPUs. +6. Non-uniform memory architectures (NUMA). + +These can cause variance in benchmarks results within a single run +(`--benchmark_repetitions=N`) or across multiple runs of the benchmark +program. + +Reducing sources of variance is OS and architecture dependent, which is one +reason some companies maintain machines dedicated to performance testing. + +Some of the easier and and effective ways of reducing variance on a typical +Linux workstation are: + +1. Use the performance governor as [discussed +above](user_guide#disabling-cpu-frequency-scaling). +1. Disable processor boosting by: + ```sh + echo 0 | sudo tee /sys/devices/system/cpu/cpufreq/boost + ``` + See the Linux kernel's + [boost.txt](https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt) + for more information. +2. Set the benchmark program's task affinity to a fixed cpu. For example: + ```sh + taskset -c 0 ./mybenchmark + ``` +3. Disabling Hyperthreading/SMT. This can be done in the Bios or using the + `/sys` file system (see the LLVM project's [Benchmarking + tips](https://llvm.org/docs/Benchmarking.html)). +4. Close other programs that do non-trivial things based on timers, such as + your web browser, desktop environment, etc. +5. Reduce the working set of your benchmark to fit within the L1 cache, but + do be aware that this may lead you to optimize for an unrelistic + situation. + +Further resources on this topic: + +1. The LLVM project's [Benchmarking + tips](https://llvm.org/docs/Benchmarking.html). +1. The Arch Wiki [Cpu frequency +scaling](https://wiki.archlinux.org/title/CPU_frequency_scaling) page. diff --git a/docs/releasing.md b/docs/releasing.md index 334f935..cdf4159 100644 --- a/docs/releasing.md +++ b/docs/releasing.md @@ -1,20 +1,24 @@ # How to release * Make sure you're on main and synced to HEAD -* Ensure the project builds and tests run (sanity check only, obviously) +* Ensure the project builds and tests run * `parallel -j0 exec ::: test/*_test` can help ensure everything at least passes * Prepare release notes * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of commits between the last annotated tag and HEAD * Pick the most interesting. -* Create one last commit that updates the version saved in `CMakeLists.txt` and the - `__version__` variable in `bindings/python/google_benchmark/__init__.py`to the release - version you're creating. (This version will be used if benchmark is installed from the - archive you'll be creating in the next step.) +* Create one last commit that updates the version saved in `CMakeLists.txt`, `MODULE.bazel` + and the `__version__` variable in `bindings/python/google_benchmark/__init__.py`to the + release version you're creating. (This version will be used if benchmark is installed + from the archive you'll be creating in the next step.) ``` -project (benchmark VERSION 1.6.0 LANGUAGES CXX) +project (benchmark VERSION 1.8.0 LANGUAGES CXX) +``` + +``` +module(name = "com_github_google_benchmark", version="1.8.0") ``` ```python @@ -22,7 +26,7 @@ project (benchmark VERSION 1.6.0 LANGUAGES CXX) # ... -__version__ = "1.6.0" # <-- change this to the release version you are creating +__version__ = "1.8.0" # <-- change this to the release version you are creating # ... ``` @@ -33,3 +37,5 @@ __version__ = "1.6.0" # <-- change this to the release version you are creating * `git pull --tags` * `git tag -a -f ` * `git push --force --tags origin` +* Confirm that the "Build and upload Python wheels" action runs to completion + * run it manually if it hasn't run diff --git a/docs/tools.md b/docs/tools.md index f2d0c49..411f41d 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -186,6 +186,146 @@ Benchmark Time CPU Time Old This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one. As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. +### Note: Interpreting the output + +Performance measurements are an art, and performance comparisons are doubly so. +Results are often noisy and don't necessarily have large absolute differences to +them, so just by visual inspection, it is not at all apparent if two +measurements are actually showing a performance change or not. It is even more +confusing with multiple benchmark repetitions. + +Thankfully, what we can do, is use statistical tests on the results to determine +whether the performance has statistically-significantly changed. `compare.py` +uses [Mann–Whitney U +test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), with a null +hypothesis being that there's no difference in performance. + +**The below output is a summary of a benchmark comparison with statistics +provided for a multi-threaded process.** +``` +Benchmark Time CPU Time Old Time New CPU Old CPU New +----------------------------------------------------------------------------------------------------------------------------- +benchmark/threads:1/process_time/real_time_pvalue 0.0000 0.0000 U Test, Repetitions: 27 vs 27 +benchmark/threads:1/process_time/real_time_mean -0.1442 -0.1442 90 77 90 77 +benchmark/threads:1/process_time/real_time_median -0.1444 -0.1444 90 77 90 77 +benchmark/threads:1/process_time/real_time_stddev +0.3974 +0.3933 0 0 0 0 +benchmark/threads:1/process_time/real_time_cv +0.6329 +0.6280 0 0 0 0 +OVERALL_GEOMEAN -0.1442 -0.1442 0 0 0 0 +``` +-------------------------------------------- +Here's a breakdown of each row: + +**benchmark/threads:1/process_time/real_time_pvalue**: This shows the _p-value_ for +the statistical test comparing the performance of the process running with one +thread. A value of 0.0000 suggests a statistically significant difference in +performance. The comparison was conducted using the U Test (Mann-Whitney +U Test) with 27 repetitions for each case. + +**benchmark/threads:1/process_time/real_time_mean**: This shows the relative +difference in mean execution time between two different cases. The negative +value (-0.1442) implies that the new process is faster by about 14.42%. The old +time was 90 units, while the new time is 77 units. + +**benchmark/threads:1/process_time/real_time_median**: Similarly, this shows the +relative difference in the median execution time. Again, the new process is +faster by 14.44%. + +**benchmark/threads:1/process_time/real_time_stddev**: This is the relative +difference in the standard deviation of the execution time, which is a measure +of how much variation or dispersion there is from the mean. A positive value +(+0.3974) implies there is more variance in the execution time in the new +process. + +**benchmark/threads:1/process_time/real_time_cv**: CV stands for Coefficient of +Variation. It is the ratio of the standard deviation to the mean. It provides a +standardized measure of dispersion. An increase (+0.6329) indicates more +relative variability in the new process. + +**OVERALL_GEOMEAN**: Geomean stands for geometric mean, a type of average that is +less influenced by outliers. The negative value indicates a general improvement +in the new process. However, given the values are all zero for the old and new +times, this seems to be a mistake or placeholder in the output. + +----------------------------------------- + + + +Let's first try to see what the different columns represent in the above +`compare.py` benchmarking output: + + 1. **Benchmark:** The name of the function being benchmarked, along with the + size of the input (after the slash). + + 2. **Time:** The average time per operation, across all iterations. + + 3. **CPU:** The average CPU time per operation, across all iterations. + + 4. **Iterations:** The number of iterations the benchmark was run to get a + stable estimate. + + 5. **Time Old and Time New:** These represent the average time it takes for a + function to run in two different scenarios or versions. For example, you + might be comparing how fast a function runs before and after you make some + changes to it. + + 6. **CPU Old and CPU New:** These show the average amount of CPU time that the + function uses in two different scenarios or versions. This is similar to + Time Old and Time New, but focuses on CPU usage instead of overall time. + +In the comparison section, the relative differences in both time and CPU time +are displayed for each input size. + + +A statistically-significant difference is determined by a **p-value**, which is +a measure of the probability that the observed difference could have occurred +just by random chance. A smaller p-value indicates stronger evidence against the +null hypothesis. + +**Therefore:** + 1. If the p-value is less than the chosen significance level (alpha), we + reject the null hypothesis and conclude the benchmarks are significantly + different. + 2. If the p-value is greater than or equal to alpha, we fail to reject the + null hypothesis and treat the two benchmarks as similar. + + + +The result of said the statistical test is additionally communicated through color coding: +```diff ++ Green: +``` + The benchmarks are _**statistically different**_. This could mean the + performance has either **significantly improved** or **significantly + deteriorated**. You should look at the actual performance numbers to see which + is the case. +```diff +- Red: +``` + The benchmarks are _**statistically similar**_. This means the performance + **hasn't significantly changed**. + +In statistical terms, **'green'** means we reject the null hypothesis that +there's no difference in performance, and **'red'** means we fail to reject the +null hypothesis. This might seem counter-intuitive if you're expecting 'green' +to mean 'improved performance' and 'red' to mean 'worsened performance'. +```bash + But remember, in this context: + + 'Success' means 'successfully finding a difference'. + 'Failure' means 'failing to find a difference'. +``` + + +Also, please note that **even if** we determine that there **is** a +statistically-significant difference between the two measurements, it does not +_necessarily_ mean that the actual benchmarks that were measured **are** +different, or vice versa, even if we determine that there is **no** +statistically-significant difference between the two measurements, it does not +necessarily mean that the actual benchmarks that were measured **are not** +different. + + + ### U test If there is a sufficient repetition count of the benchmarks, the tool can do diff --git a/docs/user_guide.md b/docs/user_guide.md index 34bea69..2ceb13e 100644 --- a/docs/user_guide.md +++ b/docs/user_guide.md @@ -50,14 +50,19 @@ [Custom Statistics](#custom-statistics) +[Memory Usage](#memory-usage) + [Using RegisterBenchmark](#using-register-benchmark) [Exiting with an Error](#exiting-with-an-error) -[A Faster KeepRunning Loop](#a-faster-keep-running-loop) +[A Faster `KeepRunning` Loop](#a-faster-keep-running-loop) + +## Benchmarking Tips [Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling) +[Reducing Variance in Benchmarks](reducing_variance.md) @@ -180,6 +185,12 @@ BM_memcpy/32 12 ns 12 ns 54687500 BM_memcpy/32k 1834 ns 1837 ns 357143 ``` +## Disabling Benchmarks + +It is possible to temporarily disable benchmarks by renaming the benchmark +function to have the prefix "DISABLED_". This will cause the benchmark to +be skipped at runtime. + ## Result comparison @@ -232,6 +243,19 @@ iterations is at least one, not more than 1e9, until CPU time is greater than the minimum time, or the wallclock time is 5x minimum time. The minimum time is set per benchmark by calling `MinTime` on the registered benchmark object. +Furthermore warming up a benchmark might be necessary in order to get +stable results because of e.g caching effects of the code under benchmark. +Warming up means running the benchmark a given amount of time, before +results are actually taken into account. The amount of time for which +the warmup should be run can be set per benchmark by calling +`MinWarmUpTime` on the registered benchmark object or for all benchmarks +using the `--benchmark_min_warmup_time` command-line option. Note that +`MinWarmUpTime` will overwrite the value of `--benchmark_min_warmup_time` +for the single benchmark. How many iterations the warmup run of each +benchmark takes is determined the same way as described in the paragraph +above. Per default the warmup phase is set to 0 seconds and is therefore +disabled. + Average timings are then reported over the iterations run. If multiple repetitions are requested using the `--benchmark_repetitions` command-line option, or at registration time, the benchmark function will be run several @@ -247,10 +271,12 @@ information about the machine on which the benchmarks are run. Global setup/teardown specific to each benchmark can be done by passing a callback to Setup/Teardown: -The setup/teardown callbacks will be invoked once for each benchmark. -If the benchmark is multi-threaded (will run in k threads), they will be invoked exactly once before -each run with k threads. -If the benchmark uses different size groups of threads, the above will be true for each size group. +The setup/teardown callbacks will be invoked once for each benchmark. If the +benchmark is multi-threaded (will run in k threads), they will be invoked +exactly once before each run with k threads. + +If the benchmark uses different size groups of threads, the above will be true +for each size group. Eg., @@ -293,7 +319,7 @@ static void BM_memcpy(benchmark::State& state) { delete[] src; delete[] dst; } -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); +BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(4<<10)->Arg(8<<10); ``` The preceding code is quite repetitive, and can be replaced with the following @@ -322,7 +348,8 @@ the performance of `std::vector` initialization for uniformly increasing sizes. static void BM_DenseRange(benchmark::State& state) { for(auto _ : state) { std::vector v(state.range(0), state.range(0)); - benchmark::DoNotOptimize(v.data()); + auto data = v.data(); + benchmark::DoNotOptimize(data); benchmark::ClobberMemory(); } } @@ -362,17 +389,17 @@ short-hand. The following macro will pick a few appropriate arguments in the product of the two specified ranges and will generate a benchmark for each such pair. -{% raw %} + ```c++ BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); ``` -{% endraw %} + Some benchmarks may require specific argument values that cannot be expressed with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a benchmark input for each combination in the product of the supplied vectors. -{% raw %} + ```c++ BENCHMARK(BM_SetInsert) ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) @@ -391,7 +418,7 @@ BENCHMARK(BM_SetInsert) ->Args({3<<10, 80}) ->Args({8<<10, 80}); ``` -{% endraw %} + For the most common scenarios, helper methods for creating a list of integers for a given sparse or dense range are provided. @@ -434,13 +461,22 @@ The `test_case_name` is appended to the name of the benchmark and should describe the values passed. ```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] +template +void BM_takes_args(benchmark::State& state, Args&&... args) { + auto args_tuple = std::make_tuple(std::move(args)...); + for (auto _ : state) { + std::cout << std::get<0>(args_tuple) << ": " << std::get<1>(args_tuple) + << '\n'; + [...] + } } // Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. +// the specified values to `args`. BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); + +// Registers the same benchmark "BM_takes_args/int_test" that passes +// the specified values to `args`. +BENCHMARK_CAPTURE(BM_takes_args, int_test, 42, 43); ``` Note that elements of `...args` may refer to global variables. Users should @@ -459,7 +495,8 @@ static void BM_StringCompare(benchmark::State& state) { std::string s1(state.range(0), '-'); std::string s2(state.range(0), '-'); for (auto _ : state) { - benchmark::DoNotOptimize(s1.compare(s2)); + auto comparison_result = s1.compare(s2); + benchmark::DoNotOptimize(comparison_result); } state.SetComplexityN(state.range(0)); } @@ -668,7 +705,7 @@ is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024 When you're compiling in C++11 mode or later you can use `insert()` with `std::initializer_list`: -{% raw %} + ```c++ // With C++11, this can be done: state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); @@ -677,7 +714,7 @@ When you're compiling in C++11 mode or later you can use `insert()` with state.counters["Bar"] = numBars; state.counters["Baz"] = numBazs; ``` -{% endraw %} + ### Counter Reporting @@ -773,6 +810,16 @@ static void BM_MultiThreaded(benchmark::State& state) { BENCHMARK(BM_MultiThreaded)->Threads(2); ``` +To run the benchmark across a range of thread counts, instead of `Threads`, use +`ThreadRange`. This takes two parameters (`min_threads` and `max_threads`) and +runs the benchmark once for values in the inclusive range. For example: + +```c++ +BENCHMARK(BM_MultiThreaded)->ThreadRange(1, 8); +``` + +will run `BM_MultiThreaded` with thread counts 1, 2, 4, and 8. + If the benchmarked code itself uses threads and you want to compare it to single-threaded code, you may want to use real-time ("wallclock") measurements for latency comparisons: @@ -814,7 +861,7 @@ BENCHMARK(BM_OpenMP)->Range(8, 8<<10); // Measure the user-visible time, the wall clock (literally, the time that // has passed on the clock on the wall), use it to decide for how long to -// run the benchmark loop. This will always be meaningful, an will match the +// run the benchmark loop. This will always be meaningful, and will match the // time spent by the main thread in single-threaded case, in general decreasing // with the number of internal threads doing the work. BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime(); @@ -836,7 +883,7 @@ is measured. But sometimes, it is necessary to do some work inside of that loop, every iteration, but without counting that time to the benchmark time. That is possible, although it is not recommended, since it has high overhead. -{% raw %} + ```c++ static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { std::set data; @@ -851,7 +898,7 @@ static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { } BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); ``` -{% endraw %} + @@ -906,6 +953,10 @@ order to manually set the time unit, you can specify it manually: BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); ``` +Additionally the default time unit can be set globally with the +`--benchmark_time_unit={ns|us|ms|s}` command line argument. The argument only +affects benchmarks where the time unit is not set explicitly. + ## Preventing Optimization @@ -958,7 +1009,8 @@ static void BM_vector_push_back(benchmark::State& state) { for (auto _ : state) { std::vector v; v.reserve(1); - benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. + auto data = v.data(); // Allow v.data() to be clobbered. Pass as non-const + benchmark::DoNotOptimize(data); // lvalue to avoid undesired compiler optimizations v.push_back(42); benchmark::ClobberMemory(); // Force 42 to be written to memory. } @@ -1037,10 +1089,25 @@ void BM_spin_empty(benchmark::State& state) { BENCHMARK(BM_spin_empty) ->ComputeStatistics("ratio", [](const std::vector& v) -> double { return std::begin(v) / std::end(v); - }, benchmark::StatisticUnit::Percentage) + }, benchmark::StatisticUnit::kPercentage) ->Arg(512); ``` + + +## Memory Usage + +It's often useful to also track memory usage for benchmarks, alongside CPU +performance. For this reason, benchmark offers the `RegisterMemoryManager` +method that allows a custom `MemoryManager` to be injected. + +If set, the `MemoryManager::Start` and `MemoryManager::Stop` methods will be +called at the start and end of benchmark runs to allow user code to fill out +a report on the number of allocations, bytes used, etc. + +This data will then be reported alongside other performance data, currently +only when using JSON output. + ## Using RegisterBenchmark(name, fn, args...) @@ -1077,7 +1144,7 @@ int main(int argc, char** argv) { When errors caused by external influences, such as file I/O and network communication, occur within a benchmark the -`State::SkipWithError(const char* msg)` function can be used to skip that run +`State::SkipWithError(const std::string& msg)` function can be used to skip that run of benchmark and report the error. Note that only future iterations of the `KeepRunning()` are skipped. For the ranged-for version of the benchmark loop Users must explicitly exit the loop, otherwise all iterations will be performed. @@ -1188,13 +1255,12 @@ the benchmark loop should be preferred. If you see this error: ``` -***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. +***WARNING*** CPU scaling is enabled, the benchmark real time measurements may +be noisy and will incur extra overhead. ``` -you might want to disable the CPU frequency scaling while running the benchmark: +you might want to disable the CPU frequency scaling while running the +benchmark, as well as consider other ways to stabilize the performance of +your system while benchmarking. -```bash -sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave -``` +See [Reducing Variance](reducing_variance.md) for more information. diff --git a/include/benchmark/benchmark.h b/include/benchmark/benchmark.h index c8ced38..e3857e7 100644 --- a/include/benchmark/benchmark.h +++ b/include/benchmark/benchmark.h @@ -187,6 +187,8 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #include #include +#include "benchmark/export.h" + #if defined(BENCHMARK_HAS_CXX11) #include #include @@ -216,37 +218,45 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #define BENCHMARK_UNUSED #endif +// Used to annotate functions, methods and classes so they +// are not optimized by the compiler. Useful for tests +// where you expect loops to stay in place churning cycles +#if defined(__clang__) +#define BENCHMARK_DONT_OPTIMIZE __attribute__((optnone)) +#elif defined(__GNUC__) || defined(__GNUG__) +#define BENCHMARK_DONT_OPTIMIZE __attribute__((optimize(0))) +#else +// MSVC & Intel do not have a no-optimize attribute, only line pragmas +#define BENCHMARK_DONT_OPTIMIZE +#endif + #if defined(__GNUC__) || defined(__clang__) #define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) #elif defined(_MSC_VER) && !defined(__clang__) #define BENCHMARK_ALWAYS_INLINE __forceinline -#if _MSC_VER >= 1900 -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#else -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif #define __func__ __FUNCTION__ #else #define BENCHMARK_ALWAYS_INLINE -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) #endif #define BENCHMARK_INTERNAL_TOSTRING2(x) #x #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) // clang-format off -#if defined(__GNUC__) || defined(__clang__) +#if (defined(__GNUC__) && !defined(__NVCC__) && !defined(__NVCOMPILER)) || defined(__clang__) #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) #define BENCHMARK_DISABLE_DEPRECATED_WARNING \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop") +#elif defined(__NVCOMPILER) +#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) +#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING \ + _Pragma("diagnostic push") \ + _Pragma("diag_suppress deprecated_entity_with_custom_message") +#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("diagnostic pop") #else #define BENCHMARK_BUILTIN_EXPECT(x, y) x #define BENCHMARK_DEPRECATED_MSG(msg) @@ -280,18 +290,44 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #define BENCHMARK_OVERRIDE #endif +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + namespace benchmark { class BenchmarkReporter; -void Initialize(int* argc, char** argv); -void Shutdown(); +// Default number of minimum benchmark running time in seconds. +const char kDefaultMinTimeStr[] = "0.5s"; + +BENCHMARK_EXPORT void PrintDefaultHelp(); + +BENCHMARK_EXPORT void Initialize(int* argc, char** argv, + void (*HelperPrinterf)() = PrintDefaultHelp); +BENCHMARK_EXPORT void Shutdown(); // Report to stdout all arguments in 'argv' as unrecognized except the first. // Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). -bool ReportUnrecognizedArguments(int argc, char** argv); +BENCHMARK_EXPORT bool ReportUnrecognizedArguments(int argc, char** argv); // Returns the current value of --benchmark_filter. -std::string GetBenchmarkFilter(); +BENCHMARK_EXPORT std::string GetBenchmarkFilter(); + +// Sets a new value to --benchmark_filter. (This will override this flag's +// current value). +// Should be called after `benchmark::Initialize()`, as +// `benchmark::Initialize()` will override the flag's value. +BENCHMARK_EXPORT void SetBenchmarkFilter(std::string value); + +// Returns the current value of --v (command line value for verbosity). +BENCHMARK_EXPORT int32_t GetBenchmarkVerbosity(); + +// Creates a default display reporter. Used by the library when no display +// reporter is provided, but also made available for external use in case a +// custom reporter should respect the `--benchmark_format` flag as a fallback +BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); // Generate a list of benchmarks matching the specified --benchmark_filter flag // and if --benchmark_list_tests is specified return after printing the name @@ -309,18 +345,29 @@ std::string GetBenchmarkFilter(); // 'file_reporter' is ignored. // // RETURNS: The number of matching benchmarks. -size_t RunSpecifiedBenchmarks(); -size_t RunSpecifiedBenchmarks(std::string spec); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(std::string spec); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - std::string spec); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::string spec); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, - BenchmarkReporter* file_reporter, - std::string spec); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks( + BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, std::string spec); + +// TimeUnit is passed to a benchmark in order to specify the order of magnitude +// for the measured time. +enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; + +BENCHMARK_EXPORT TimeUnit GetDefaultTimeUnit(); + +// Sets the default time unit the benchmarks use +// Has to be called before the benchmark loop to take effect +BENCHMARK_EXPORT void SetDefaultTimeUnit(TimeUnit unit); // If a MemoryManager is registered (via RegisterMemoryManager()), // it can be used to collect and report allocation metrics for a run of the @@ -358,20 +405,16 @@ class MemoryManager { virtual void Start() = 0; // Implement this to stop recording and fill out the given Result structure. - BENCHMARK_DEPRECATED_MSG("Use Stop(Result&) instead") - virtual void Stop(Result* result) = 0; - - // FIXME(vyng): Make this pure virtual once we've migrated current users. - BENCHMARK_DISABLE_DEPRECATED_WARNING - virtual void Stop(Result& result) { Stop(&result); } - BENCHMARK_RESTORE_DEPRECATED_WARNING + virtual void Stop(Result& result) = 0; }; // Register a MemoryManager instance that will be used to collect and report // allocation measurements for benchmark runs. +BENCHMARK_EXPORT void RegisterMemoryManager(MemoryManager* memory_manager); // Add a key-value pair to output as part of the context stanza in the report. +BENCHMARK_EXPORT void AddCustomContext(const std::string& key, const std::string& value); namespace internal { @@ -379,14 +422,17 @@ class Benchmark; class BenchmarkImp; class BenchmarkFamilies; +BENCHMARK_EXPORT std::map*& GetGlobalContext(); + +BENCHMARK_EXPORT void UseCharPointer(char const volatile*); // Take ownership of the pointer and register the benchmark. Return the // registered benchmark. -Benchmark* RegisterBenchmarkInternal(Benchmark*); +BENCHMARK_EXPORT Benchmark* RegisterBenchmarkInternal(Benchmark*); // Ensure that the standard streams are properly initialized in every TU. -int InitializeStreams(); +BENCHMARK_EXPORT int InitializeStreams(); BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); } // namespace internal @@ -409,7 +455,11 @@ inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { // intended to add little to no overhead. // See: https://youtu.be/nXaxk27zwlk?t=2441 #ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#if !defined(__GNUC__) || defined(__llvm__) || defined(__INTEL_COMPILER) template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { asm volatile("" : : "r,m"(value) : "memory"); } @@ -423,6 +473,98 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { #endif } +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { +#if defined(__clang__) + asm volatile("" : "+r,m"(value) : : "memory"); +#else + asm volatile("" : "+m,r"(value) : : "memory"); +#endif +} +#endif +#elif defined(BENCHMARK_HAS_CXX11) && (__GNUC__ >= 5) +// Workaround for a bug with full argument copy overhead with GCC. +// See: #1340 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105519 +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); +} + +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m,r"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp&& value) { + asm volatile("" : "+m,r"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp&& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +#else +// Fallback for GCC < 5. Can add some overhead because the compiler is forced +// to use memory operations instead of operations with registers. +// TODO: Remove if GCC < 5 will be unsupported. +template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +#ifdef BENCHMARK_HAS_CXX11 +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) { + asm volatile("" : "+m"(value) : : "memory"); +} +#endif +#endif + #ifndef BENCHMARK_HAS_CXX11 inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { asm volatile("" : : : "memory"); @@ -430,6 +572,9 @@ inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { #endif #elif defined(_MSC_VER) template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast(value)); _ReadWriteBarrier(); @@ -440,6 +585,9 @@ inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } #endif #else template +BENCHMARK_DEPRECATED_MSG( + "The const-ref version of this method can permit " + "undesired compiler optimizations in benchmarks") inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast(value)); } @@ -506,17 +654,13 @@ Counter::Flags inline operator|(const Counter::Flags& LHS, // This is the container for the user-defined counters. typedef std::map UserCounters; -// TimeUnit is passed to a benchmark in order to specify the order of magnitude -// for the measured time. -enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; - // BigO is passed to a benchmark in order to specify the asymptotic // computational // complexity for the benchmark. In case oAuto is selected, complexity will be // calculated automatically to the best fit. enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; -typedef uint64_t IterationCount; +typedef int64_t IterationCount; enum StatisticUnit { kTime, kPercentage }; @@ -564,11 +708,21 @@ enum AggregationReportMode ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly }; +enum Skipped +#if defined(BENCHMARK_HAS_CXX11) + : unsigned +#endif +{ + NotSkipped = 0, + SkippedWithMessage, + SkippedWithError +}; + } // namespace internal // State is passed to a running Benchmark and contains state for the // benchmark to use. -class State { +class BENCHMARK_EXPORT State { public: struct StateIterator; friend struct StateIterator; @@ -600,8 +754,8 @@ class State { // } bool KeepRunningBatch(IterationCount n); - // REQUIRES: timer is running and 'SkipWithError(...)' has not been called - // by the current thread. + // REQUIRES: timer is running and 'SkipWithMessage(...)' or + // 'SkipWithError(...)' has not been called by the current thread. // Stop the benchmark timer. If not called, the timer will be // automatically stopped after the last iteration of the benchmark loop. // @@ -616,8 +770,8 @@ class State { // within each benchmark iteration, if possible. void PauseTiming(); - // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called - // by the current thread. + // REQUIRES: timer is not running and 'SkipWithMessage(...)' or + // 'SkipWithError(...)' has not been called by the current thread. // Start the benchmark timer. The timer is NOT running on entrance to the // benchmark function. It begins running after control flow enters the // benchmark loop. @@ -627,8 +781,30 @@ class State { // within each benchmark iteration, if possible. void ResumeTiming(); - // REQUIRES: 'SkipWithError(...)' has not been called previously by the - // current thread. + // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been + // called previously by the current thread. + // Report the benchmark as resulting in being skipped with the specified + // 'msg'. + // After this call the user may explicitly 'return' from the benchmark. + // + // If the ranged-for style of benchmark loop is used, the user must explicitly + // break from the loop, otherwise all future iterations will be run. + // If the 'KeepRunning()' loop is used the current thread will automatically + // exit the loop at the end of the current iteration. + // + // For threaded benchmarks only the current thread stops executing and future + // calls to `KeepRunning()` will block until all threads have completed + // the `KeepRunning()` loop. If multiple threads report being skipped only the + // first skip message is used. + // + // NOTE: Calling 'SkipWithMessage(...)' does not cause the benchmark to exit + // the current scope immediately. If the function is called from within + // the 'KeepRunning()' loop the current iteration will finish. It is the users + // responsibility to exit the scope as needed. + void SkipWithMessage(const std::string& msg); + + // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been + // called previously by the current thread. // Report the benchmark as resulting in an error with the specified 'msg'. // After this call the user may explicitly 'return' from the benchmark. // @@ -646,10 +822,13 @@ class State { // the current scope immediately. If the function is called from within // the 'KeepRunning()' loop the current iteration will finish. It is the users // responsibility to exit the scope as needed. - void SkipWithError(const char* msg); + void SkipWithError(const std::string& msg); + + // Returns true if 'SkipWithMessage(...)' or 'SkipWithError(...)' was called. + bool skipped() const { return internal::NotSkipped != skipped_; } // Returns true if an error has been reported with 'SkipWithError(...)'. - bool error_occurred() const { return error_occurred_; } + bool error_occurred() const { return internal::SkippedWithError == skipped_; } // REQUIRES: called exactly once per iteration of the benchmarking loop. // Set the manually measured time for this benchmark iteration, which @@ -720,11 +899,7 @@ class State { // BM_Compress 50 50 14115038 compress:27.3% // // REQUIRES: a benchmark has exited its benchmarking loop. - void SetLabel(const char* label); - - void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { - this->SetLabel(str.c_str()); - } + void SetLabel(const std::string& label); // Range arguments for this run. CHECKs if the argument has been set. BENCHMARK_ALWAYS_INLINE @@ -755,6 +930,9 @@ class State { return max_iterations - total_iterations_ + batch_leftover_; } + BENCHMARK_ALWAYS_INLINE + std::string name() const { return name_; } + private: // items we expect on the first cache line (ie 64 bytes of the struct) // When total_iterations_ is 0, KeepRunning() and friends will return false. @@ -772,7 +950,7 @@ class State { private: bool started_; bool finished_; - bool error_occurred_; + internal::Skipped skipped_; // items we don't need on the first cache line std::vector range_; @@ -784,9 +962,9 @@ class State { UserCounters counters; private: - State(IterationCount max_iters, const std::vector& ranges, - int thread_i, int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager, + State(std::string name, IterationCount max_iters, + const std::vector& ranges, int thread_i, int n_threads, + internal::ThreadTimer* timer, internal::ThreadManager* manager, internal::PerfCountersMeasurement* perf_counters_measurement); void StartKeepRunning(); @@ -795,6 +973,7 @@ class State { bool KeepRunningInternal(IterationCount n, bool is_batch); void FinishKeepRunning(); + const std::string name_; const int thread_index_; const int threads_; @@ -826,7 +1005,7 @@ inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n, } if (!started_) { StartKeepRunning(); - if (!error_occurred_ && total_iterations_ >= n) { + if (!skipped() && total_iterations_ >= n) { total_iterations_ -= n; return true; } @@ -856,7 +1035,7 @@ struct State::StateIterator { BENCHMARK_ALWAYS_INLINE explicit StateIterator(State* st) - : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} + : cached_(st->skipped() ? 0 : st->max_iterations), parent_(st) {} public: BENCHMARK_ALWAYS_INLINE @@ -899,7 +1078,7 @@ typedef void(Function)(State&); // be called on this object to change the properties of the benchmark. // Each method returns "this" so that multiple method calls can // chained into one expression. -class Benchmark { +class BENCHMARK_EXPORT Benchmark { public: virtual ~Benchmark(); @@ -1000,12 +1179,19 @@ class Benchmark { // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. Benchmark* MinTime(double t); + // Set the minimum amount of time to run the benchmark before taking runtimes + // of this benchmark into account. This + // option overrides the `benchmark_min_warmup_time` flag. + // REQUIRES: `t >= 0` and `Iterations` has not been called on this benchmark. + Benchmark* MinWarmUpTime(double t); + // Specify the amount of iterations that should be run by this benchmark. + // This option overrides the `benchmark_min_time` flag. // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. // // NOTE: This function should only be used when *exact* iteration control is // needed and never to control or limit how long a benchmark runs, where - // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. + // `--benchmark_min_time=s` or `MinTime(...)` should be used instead. Benchmark* Iterations(IterationCount n); // Specify the amount of times to repeat this benchmark. This option overrides @@ -1025,7 +1211,7 @@ class Benchmark { // By default, the CPU time is measured only for the main thread, which may // be unrepresentative if the benchmark uses threads internally. If called, // the total CPU time spent by all the threads will be measured instead. - // By default, the only the main thread CPU time will be measured. + // By default, only the main thread CPU time will be measured. Benchmark* MeasureProcessCPUTime(); // If a particular benchmark should use the Wall clock instead of the CPU time @@ -1090,12 +1276,16 @@ class Benchmark { virtual void Run(State& state) = 0; - protected: - explicit Benchmark(const char* name); - Benchmark(Benchmark const&); - void SetName(const char* name); + TimeUnit GetTimeUnit() const; + protected: + explicit Benchmark(const std::string& name); + void SetName(const std::string& name); + + public: + const char* GetName() const; int ArgsCnt() const; + const char* GetArgName(int arg) const; private: friend class BenchmarkFamilies; @@ -1105,9 +1295,13 @@ class Benchmark { AggregationReportMode aggregation_report_mode_; std::vector arg_names_; // Args for all benchmark runs std::vector > args_; // Args for all benchmark runs + TimeUnit time_unit_; + bool use_default_time_unit_; + int range_multiplier_; double min_time_; + double min_warmup_time_; IterationCount iterations_; int repetitions_; bool measure_process_cpu_time_; @@ -1122,7 +1316,17 @@ class Benchmark { callback_function setup_; callback_function teardown_; - Benchmark& operator=(Benchmark const&); + Benchmark(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; + + Benchmark& operator=(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; }; } // namespace internal @@ -1131,27 +1335,27 @@ class Benchmark { // the specified functor 'fn'. // // RETURNS: A pointer to the registered benchmark. -internal::Benchmark* RegisterBenchmark(const char* name, +internal::Benchmark* RegisterBenchmark(const std::string& name, internal::Function* fn); #if defined(BENCHMARK_HAS_CXX11) template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn); #endif // Remove all registered benchmarks. All pointers to previously registered // benchmarks are invalidated. -void ClearRegisteredBenchmarks(); +BENCHMARK_EXPORT void ClearRegisteredBenchmarks(); namespace internal { // The class used to hold all Benchmarks created from static function. // (ie those created using the BENCHMARK(...) macros. -class FunctionBenchmark : public Benchmark { +class BENCHMARK_EXPORT FunctionBenchmark : public Benchmark { public: - FunctionBenchmark(const char* name, Function* func) + FunctionBenchmark(const std::string& name, Function* func) : Benchmark(name), func_(func) {} - virtual void Run(State& st) BENCHMARK_OVERRIDE; + void Run(State& st) BENCHMARK_OVERRIDE; private: Function* func_; @@ -1161,35 +1365,38 @@ class FunctionBenchmark : public Benchmark { template class LambdaBenchmark : public Benchmark { public: - virtual void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); } + void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); } private: template - LambdaBenchmark(const char* name, OLambda&& lam) + LambdaBenchmark(const std::string& name, OLambda&& lam) : Benchmark(name), lambda_(std::forward(lam)) {} LambdaBenchmark(LambdaBenchmark const&) = delete; template // NOLINTNEXTLINE(readability-redundant-declaration) - friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); + friend Benchmark* ::benchmark::RegisterBenchmark(const std::string&, Lam&&); Lambda lambda_; }; #endif - } // namespace internal -inline internal::Benchmark* RegisterBenchmark(const char* name, +inline internal::Benchmark* RegisterBenchmark(const std::string& name, internal::Function* fn) { + // FIXME: this should be a `std::make_unique<>()` but we don't have C++14. + // codechecker_intentional [cplusplus.NewDeleteLeaks] return internal::RegisterBenchmarkInternal( ::new internal::FunctionBenchmark(name, fn)); } #ifdef BENCHMARK_HAS_CXX11 template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) { +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn) { using BenchType = internal::LambdaBenchmark::type>; + // FIXME: this should be a `std::make_unique<>()` but we don't have C++14. + // codechecker_intentional [cplusplus.NewDeleteLeaks] return internal::RegisterBenchmarkInternal( ::new BenchType(name, std::forward(fn))); } @@ -1198,7 +1405,7 @@ internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn) { #if defined(BENCHMARK_HAS_CXX11) && \ (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409) template -internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn, +internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn, Args&&... args) { return benchmark::RegisterBenchmark( name, [=](benchmark::State& st) { fn(st, args...); }); @@ -1212,7 +1419,7 @@ class Fixture : public internal::Benchmark { public: Fixture() : internal::Benchmark("") {} - virtual void Run(State& st) BENCHMARK_OVERRIDE { + void Run(State& st) BENCHMARK_OVERRIDE { this->SetUp(st); this->BenchmarkCase(st); this->TearDown(st); @@ -1228,7 +1435,6 @@ class Fixture : public internal::Benchmark { protected: virtual void BenchmarkCase(State&) = 0; }; - } // namespace benchmark // ------------------------------------------------------ @@ -1268,7 +1474,7 @@ class Fixture : public internal::Benchmark { BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \ - &__VA_ARGS__))) + __VA_ARGS__))) #else #define BENCHMARK(n) \ BENCHMARK_PRIVATE_DECLARE(n) = \ @@ -1335,37 +1541,37 @@ class Fixture : public internal::Benchmark { #define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) #endif -#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() { \ - this->SetName(#BaseClass "/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ +#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; -#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() { \ - this->SetName(#BaseClass "<" #a ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ +#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a ">/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; -#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() { \ - this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ +#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ + } \ + \ + protected: \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; #ifdef BENCHMARK_HAS_CXX11 @@ -1377,7 +1583,7 @@ class Fixture : public internal::Benchmark { } \ \ protected: \ - virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ + void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; #else #define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ @@ -1439,8 +1645,15 @@ class Fixture : public internal::Benchmark { #endif // Helper macro to create a main routine in a test that runs the benchmarks +// Note the workaround for Hexagon simulator passing argc != 0, argv = NULL. #define BENCHMARK_MAIN() \ int main(int argc, char** argv) { \ + char arg0_default[] = "benchmark"; \ + char* args_default = arg0_default; \ + if (!argv) { \ + argc = 1; \ + argv = &args_default; \ + } \ ::benchmark::Initialize(&argc, argv); \ if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ ::benchmark::RunSpecifiedBenchmarks(); \ @@ -1454,7 +1667,7 @@ class Fixture : public internal::Benchmark { namespace benchmark { -struct CPUInfo { +struct BENCHMARK_EXPORT CPUInfo { struct CacheInfo { std::string type; int level; @@ -1478,7 +1691,7 @@ struct CPUInfo { }; // Adding Struct for System Information -struct SystemInfo { +struct BENCHMARK_EXPORT SystemInfo { std::string name; static const SystemInfo& Get(); @@ -1490,10 +1703,11 @@ struct SystemInfo { // BenchmarkName contains the components of the Benchmark's name // which allows individual fields to be modified or cleared before // building the final name using 'str()'. -struct BenchmarkName { +struct BENCHMARK_EXPORT BenchmarkName { std::string function_name; std::string args; std::string min_time; + std::string min_warmup_time; std::string iterations; std::string repetitions; std::string time_type; @@ -1509,7 +1723,7 @@ struct BenchmarkName { // can control the destination of the reports by calling // RunSpecifiedBenchmarks and passing it a custom reporter object. // The reporter object must implement the following interface. -class BenchmarkReporter { +class BENCHMARK_EXPORT BenchmarkReporter { public: struct Context { CPUInfo const& cpu_info; @@ -1520,17 +1734,17 @@ class BenchmarkReporter { Context(); }; - struct Run { + struct BENCHMARK_EXPORT Run { static const int64_t no_repetition_index = -1; enum RunType { RT_Iteration, RT_Aggregate }; Run() : run_type(RT_Iteration), aggregate_unit(kTime), - error_occurred(false), + skipped(internal::NotSkipped), iterations(1), threads(1), - time_unit(kNanosecond), + time_unit(GetDefaultTimeUnit()), real_accumulated_time(0), cpu_accumulated_time(0), max_heapbytes_used(0), @@ -1550,8 +1764,8 @@ class BenchmarkReporter { std::string aggregate_name; StatisticUnit aggregate_unit; std::string report_label; // Empty if not set by benchmark. - bool error_occurred; - std::string error_message; + internal::Skipped skipped; + std::string skip_message; IterationCount iterations; int64_t threads; @@ -1620,6 +1834,12 @@ class BenchmarkReporter { // to skip runs based on the context information. virtual bool ReportContext(const Context& context) = 0; + // Called once for each group of benchmark runs, gives information about + // the configurations of the runs. + virtual void ReportRunsConfig(double /*min_time*/, + bool /*has_explicit_iters*/, + IterationCount /*iters*/) {} + // Called once for each group of benchmark runs, gives information about // cpu-time and heap memory usage during the benchmark run. If the group // of runs contained more than two entries then 'report' contains additional @@ -1665,7 +1885,7 @@ class BenchmarkReporter { // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter { public: enum OutputOptions { OO_None = 0, @@ -1677,8 +1897,8 @@ class ConsoleReporter : public BenchmarkReporter { explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) : output_options_(opts_), name_field_width_(0), printed_header_(false) {} - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; - virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; protected: virtual void PrintRunData(const Run& report); @@ -1690,12 +1910,12 @@ class ConsoleReporter : public BenchmarkReporter { bool printed_header_; }; -class JSONReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter { public: JSONReporter() : first_report_(true) {} - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; - virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; - virtual void Finalize() BENCHMARK_OVERRIDE; + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + void Finalize() BENCHMARK_OVERRIDE; private: void PrintRunData(const Run& report); @@ -1703,13 +1923,13 @@ class JSONReporter : public BenchmarkReporter { bool first_report_; }; -class BENCHMARK_DEPRECATED_MSG( +class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG( "The CSV Reporter will be removed in a future release") CSVReporter : public BenchmarkReporter { public: CSVReporter() : printed_header_(false) {} - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; - virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; private: void PrintRunData(const Run& report); @@ -1748,18 +1968,24 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) { // Creates a list of integer values for the given range and multiplier. // This can be used together with ArgsProduct() to allow multiple ranges -// with different multiplers. +// with different multipliers. // Example: // ArgsProduct({ // CreateRange(0, 1024, /*multi=*/32), // CreateRange(0, 100, /*multi=*/4), // CreateDenseRange(0, 4, /*step=*/1), // }); +BENCHMARK_EXPORT std::vector CreateRange(int64_t lo, int64_t hi, int multi); // Creates a list of integer values for the given range and step. +BENCHMARK_EXPORT std::vector CreateDenseRange(int64_t start, int64_t limit, int step); } // namespace benchmark +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + #endif // BENCHMARK_BENCHMARK_H_ diff --git a/include/benchmark/export.h b/include/benchmark/export.h new file mode 100644 index 0000000..f96f859 --- /dev/null +++ b/include/benchmark/export.h @@ -0,0 +1,47 @@ +#ifndef BENCHMARK_EXPORT_H +#define BENCHMARK_EXPORT_H + +#if defined(_WIN32) +#define EXPORT_ATTR __declspec(dllexport) +#define IMPORT_ATTR __declspec(dllimport) +#define NO_EXPORT_ATTR +#define DEPRECATED_ATTR __declspec(deprecated) +#else // _WIN32 +#define EXPORT_ATTR __attribute__((visibility("default"))) +#define IMPORT_ATTR __attribute__((visibility("default"))) +#define NO_EXPORT_ATTR __attribute__((visibility("hidden"))) +#define DEPRECATE_ATTR __attribute__((__deprecated__)) +#endif // _WIN32 + +#ifdef BENCHMARK_STATIC_DEFINE +#define BENCHMARK_EXPORT +#define BENCHMARK_NO_EXPORT +#else // BENCHMARK_STATIC_DEFINE +#ifndef BENCHMARK_EXPORT +#ifdef benchmark_EXPORTS +/* We are building this library */ +#define BENCHMARK_EXPORT EXPORT_ATTR +#else // benchmark_EXPORTS +/* We are using this library */ +#define BENCHMARK_EXPORT IMPORT_ATTR +#endif // benchmark_EXPORTS +#endif // !BENCHMARK_EXPORT + +#ifndef BENCHMARK_NO_EXPORT +#define BENCHMARK_NO_EXPORT NO_EXPORT_ATTR +#endif // !BENCHMARK_NO_EXPORT +#endif // BENCHMARK_STATIC_DEFINE + +#ifndef BENCHMARK_DEPRECATED +#define BENCHMARK_DEPRECATED DEPRECATE_ATTR +#endif // BENCHMARK_DEPRECATED + +#ifndef BENCHMARK_DEPRECATED_EXPORT +#define BENCHMARK_DEPRECATED_EXPORT BENCHMARK_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#ifndef BENCHMARK_DEPRECATED_NO_EXPORT +#define BENCHMARK_DEPRECATED_NO_EXPORT BENCHMARK_NO_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#endif /* BENCHMARK_EXPORT_H */ diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..fe8770b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,50 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "google_benchmark" +description = "A library to benchmark code snippets." +requires-python = ">=3.8" +license = {file = "LICENSE"} +keywords = ["benchmark"] + +authors = [ + {name = "Google", email = "benchmark-discuss@googlegroups.com"}, +] + +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", +] + +dynamic = ["readme", "version"] + +dependencies = [ + "absl-py>=0.7.1", +] + +[project.urls] +Homepage = "https://github.com/google/benchmark" +Documentation = "https://github.com/google/benchmark/tree/main/docs" +Repository = "https://github.com/google/benchmark.git" +Discord = "https://discord.gg/cz7UX7wKC2" + +[tool.setuptools] +package-dir = {"" = "bindings/python"} +zip-safe = false + +[tool.setuptools.packages.find] +where = ["bindings/python"] + +[tool.setuptools.dynamic] +version = { attr = "google_benchmark.__version__" } +readme = { file = "README.md", content-type = "text/markdown" } diff --git a/setup.py b/setup.py index 4eaccf8..b02a6a7 100644 --- a/setup.py +++ b/setup.py @@ -1,56 +1,50 @@ +import contextlib import os -import posixpath import platform -import re import shutil -import sys +import sysconfig +from pathlib import Path -from distutils import sysconfig import setuptools from setuptools.command import build_ext -HERE = os.path.dirname(os.path.abspath(__file__)) +PYTHON_INCLUDE_PATH_PLACEHOLDER = "" + +IS_WINDOWS = platform.system() == "Windows" +IS_MAC = platform.system() == "Darwin" -IS_WINDOWS = sys.platform.startswith("win") - - -def _get_version(): - """Parse the version string from __init__.py.""" - with open( - os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py") - ) as init_file: +@contextlib.contextmanager +def temp_fill_include_path(fp: str): + """Temporarily set the Python include path in a file.""" + with open(fp, "r+") as f: try: - version_line = next( - line for line in init_file if line.startswith("__version__") + content = f.read() + replaced = content.replace( + PYTHON_INCLUDE_PATH_PLACEHOLDER, + Path(sysconfig.get_paths()['include']).as_posix(), ) - except StopIteration: - raise ValueError("__version__ not defined in __init__.py") - else: - namespace = {} - exec(version_line, namespace) # pylint: disable=exec-used - return namespace["__version__"] - - -def _parse_requirements(path): - with open(os.path.join(HERE, path)) as requirements: - return [ - line.rstrip() - for line in requirements - if not (line.isspace() or line.startswith("#")) - ] + f.seek(0) + f.write(replaced) + f.truncate() + yield + finally: + # revert to the original content after exit + f.seek(0) + f.write(content) + f.truncate() class BazelExtension(setuptools.Extension): """A C/C++ extension that is defined as a Bazel BUILD target.""" - def __init__(self, name, bazel_target): + def __init__(self, name: str, bazel_target: str): + super().__init__(name=name, sources=[]) + self.bazel_target = bazel_target - self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split( - ":" - ) - setuptools.Extension.__init__(self, name, sources=[]) + stripped_target = bazel_target.split("//")[-1] + self.relpath, self.target_name = stripped_target.split(":") class BuildBazelExtension(build_ext.build_ext): @@ -61,83 +55,59 @@ class BuildBazelExtension(build_ext.build_ext): self.bazel_build(ext) build_ext.build_ext.run(self) - def bazel_build(self, ext): + def bazel_build(self, ext: BazelExtension): """Runs the bazel build to create the package.""" - with open("WORKSPACE", "r") as workspace: - workspace_contents = workspace.read() + with temp_fill_include_path("WORKSPACE"): + temp_path = Path(self.build_temp) - with open("WORKSPACE", "w") as workspace: - workspace.write( - re.sub( - r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', - sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), - workspace_contents, - ) - ) + bazel_argv = [ + "bazel", + "build", + ext.bazel_target, + f"--symlink_prefix={temp_path / 'bazel-'}", + f"--compilation_mode={'dbg' if self.debug else 'opt'}", + # C++17 is required by nanobind + f"--cxxopt={'/std:c++17' if IS_WINDOWS else '-std=c++17'}", + ] - if not os.path.exists(self.build_temp): - os.makedirs(self.build_temp) + if IS_WINDOWS: + # Link with python*.lib. + for library_dir in self.library_dirs: + bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) + elif IS_MAC: + if platform.machine() == "x86_64": + # C++17 needs macOS 10.14 at minimum + bazel_argv.append("--macos_minimum_os=10.14") - bazel_argv = [ - "bazel", - "build", - ext.bazel_target, - "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"), - "--compilation_mode=" + ("dbg" if self.debug else "opt"), - ] + # cross-compilation for Mac ARM64 on GitHub Mac x86 runners. + # ARCHFLAGS is set by cibuildwheel before macOS wheel builds. + archflags = os.getenv("ARCHFLAGS", "") + if "arm64" in archflags: + bazel_argv.append("--cpu=darwin_arm64") + bazel_argv.append("--macos_cpus=arm64") - if IS_WINDOWS: - # Link with python*.lib. - for library_dir in self.library_dirs: - bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) - elif sys.platform == "darwin" and platform.machine() == "x86_64": - bazel_argv.append("--macos_minimum_os=10.9") + elif platform.machine() == "arm64": + bazel_argv.append("--macos_minimum_os=11.0") - self.spawn(bazel_argv) + self.spawn(bazel_argv) - shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' - ext_bazel_bin_path = os.path.join( - self.build_temp, 'bazel-bin', - ext.relpath, ext.target_name + shared_lib_suffix) + shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' + ext_name = ext.target_name + shared_lib_suffix + ext_bazel_bin_path = temp_path / 'bazel-bin' / ext.relpath / ext_name - ext_dest_path = self.get_ext_fullpath(ext.name) - ext_dest_dir = os.path.dirname(ext_dest_path) - if not os.path.exists(ext_dest_dir): - os.makedirs(ext_dest_dir) - shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + ext_dest_path = Path(self.get_ext_fullpath(ext.name)) + shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + + # explicitly call `bazel shutdown` for graceful exit + self.spawn(["bazel", "shutdown"]) setuptools.setup( - name="google_benchmark", - version=_get_version(), - url="https://github.com/google/benchmark", - description="A library to benchmark code snippets.", - author="Google", - author_email="benchmark-py@google.com", - # Contained modules and scripts. - package_dir={"": "bindings/python"}, - packages=setuptools.find_packages("bindings/python"), - install_requires=_parse_requirements("bindings/python/requirements.txt"), cmdclass=dict(build_ext=BuildBazelExtension), ext_modules=[ BazelExtension( - "google_benchmark._benchmark", - "//bindings/python/google_benchmark:_benchmark", + name="google_benchmark._benchmark", + bazel_target="//bindings/python/google_benchmark:_benchmark", ) ], - zip_safe=False, - # PyPI package information. - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Topic :: Software Development :: Testing", - "Topic :: System :: Benchmark", - ], - license="Apache 2.0", - keywords="benchmark", ) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e814a4e..daf82fb 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -25,12 +25,18 @@ set_target_properties(benchmark PROPERTIES SOVERSION ${GENERIC_LIB_SOVERSION} ) target_include_directories(benchmark PUBLIC - $) + $ +) # libpfm, if available -if (HAVE_LIBPFM) - target_link_libraries(benchmark PRIVATE pfm) - add_definitions(-DHAVE_LIBPFM) +if (PFM_FOUND) + target_link_libraries(benchmark PRIVATE PFM::libpfm) + target_compile_definitions(benchmark PRIVATE -DHAVE_LIBPFM) +endif() + +# pthread affinity, if available +if(HAVE_PTHREAD_AFFINITY) + target_compile_definitions(benchmark PRIVATE -DBENCHMARK_HAS_PTHREAD_AFFINITY) endif() # Link threads. @@ -53,6 +59,10 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") target_link_libraries(benchmark PRIVATE kstat) endif() +if (NOT BUILD_SHARED_LIBS) + target_compile_definitions(benchmark PUBLIC -DBENCHMARK_STATIC_DEFINE) +endif() + # Benchmark main library add_library(benchmark_main "benchmark_main.cc") add_library(benchmark::benchmark_main ALIAS benchmark_main) @@ -60,10 +70,10 @@ set_target_properties(benchmark_main PROPERTIES OUTPUT_NAME "benchmark_main" VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} + DEFINE_SYMBOL benchmark_EXPORTS ) target_link_libraries(benchmark_main PUBLIC benchmark::benchmark) - set(generated_dir "${PROJECT_BINARY_DIR}") set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") @@ -107,6 +117,7 @@ if (BENCHMARK_ENABLE_INSTALL) install( DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + "${PROJECT_BINARY_DIR}/include/benchmark" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.*h") diff --git a/src/benchmark.cc b/src/benchmark.cc index cedeee3..6139e59 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -19,7 +19,7 @@ #include "internal_macros.h" #ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) #include #endif #include @@ -65,12 +65,28 @@ BM_DEFINE_bool(benchmark_list_tests, false); // linked into the binary are run. BM_DEFINE_string(benchmark_filter, ""); -// Minimum number of seconds we should run benchmark before results are -// considered significant. For cpu-time based tests, this is the lower bound +// Specification of how long to run the benchmark. +// +// It can be either an exact number of iterations (specified as `x`), +// or a minimum number of seconds (specified as `s`). If the latter +// format (ie., min seconds) is used, the system may run the benchmark longer +// until the results are considered significant. +// +// For backward compatibility, the `s` suffix may be omitted, in which case, +// the specified number is interpreted as the number of seconds. +// +// For cpu-time based tests, this is the lower bound // on the total cpu time used by all threads that make up the test. For // real-time based tests, this is the lower bound on the elapsed time of the // benchmark execution, regardless of number of threads. -BM_DEFINE_double(benchmark_min_time, 0.5); +BM_DEFINE_string(benchmark_min_time, kDefaultMinTimeStr); + +// Minimum number of seconds a benchmark should be run before results should be +// taken into account. This e.g can be necessary for benchmarks of code which +// needs to fill some form of cache before performance is of interest. +// Note: results gathered within this period are discarded and not used for +// reported result. +BM_DEFINE_double(benchmark_min_warmup_time, 0.0); // The number of runs of each benchmark. If greater than 1, the mean and // standard deviation of the runs will be reported. @@ -121,6 +137,10 @@ BM_DEFINE_string(benchmark_perf_counters, ""); // pairs. Kept internal as it's only used for parsing from env/command line. BM_DEFINE_kvpairs(benchmark_context, {}); +// Set the default time unit to use for reports +// Valid values are 'ns', 'us', 'ms' or 's' +BM_DEFINE_string(benchmark_time_unit, ""); + // The level of verbose logging to output BM_DEFINE_int32(v, 0); @@ -128,23 +148,28 @@ namespace internal { std::map* global_context = nullptr; +BENCHMARK_EXPORT std::map*& GetGlobalContext() { + return global_context; +} + // FIXME: wouldn't LTO mess this up? void UseCharPointer(char const volatile*) {} } // namespace internal -State::State(IterationCount max_iters, const std::vector& ranges, - int thread_i, int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager, +State::State(std::string name, IterationCount max_iters, + const std::vector& ranges, int thread_i, int n_threads, + internal::ThreadTimer* timer, internal::ThreadManager* manager, internal::PerfCountersMeasurement* perf_counters_measurement) : total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false), - error_occurred_(false), + skipped_(internal::NotSkipped), range_(ranges), complexity_n_(0), + name_(std::move(name)), thread_index_(thread_i), threads_(n_threads), timer_(timer), @@ -154,6 +179,17 @@ State::State(IterationCount max_iters, const std::vector& ranges, BM_CHECK_LT(thread_index_, threads_) << "thread_index must be less than threads"; + // Add counters with correct flag now. If added with `counters[name]` in + // `PauseTiming`, a new `Counter` will be inserted the first time, which + // won't have the flag. Inserting them now also reduces the allocations + // during the benchmark. + if (perf_counters_measurement_) { + for (const std::string& counter_name : + perf_counters_measurement_->names()) { + counters[counter_name] = Counter(0.0, Counter::kAvgIterations); + } + } + // Note: The use of offsetof below is technically undefined until C++17 // because State is not a standard layout type. However, all compilers // currently provide well-defined behavior as an extension (which is @@ -166,50 +202,79 @@ State::State(IterationCount max_iters, const std::vector& ranges, #elif defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif +#if defined(__NVCC__) +#pragma nv_diagnostic push +#pragma nv_diag_suppress 1427 +#endif +#if defined(__NVCOMPILER) +#pragma diagnostic push +#pragma diag_suppress offset_in_non_POD_nonstandard #endif // Offset tests to ensure commonly accessed data is on the first cache line. const int cache_line_size = 64; - static_assert(offsetof(State, error_occurred_) <= - (cache_line_size - sizeof(error_occurred_)), - ""); + static_assert( + offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), ""); #if defined(__INTEL_COMPILER) #pragma warning pop #elif defined(__GNUC__) #pragma GCC diagnostic pop #endif +#if defined(__NVCC__) +#pragma nv_diagnostic pop +#endif +#if defined(__NVCOMPILER) +#pragma diagnostic pop +#endif } void State::PauseTiming() { // Add in time accumulated so far - BM_CHECK(started_ && !finished_ && !error_occurred_); + BM_CHECK(started_ && !finished_ && !skipped()); timer_->StopTimer(); if (perf_counters_measurement_) { - auto measurements = perf_counters_measurement_->StopAndGetMeasurements(); + std::vector> measurements; + if (!perf_counters_measurement_->Stop(measurements)) { + BM_CHECK(false) << "Perf counters read the value failed."; + } for (const auto& name_and_measurement : measurements) { - auto name = name_and_measurement.first; - auto measurement = name_and_measurement.second; - BM_CHECK_EQ(counters[name], 0.0); - counters[name] = Counter(measurement, Counter::kAvgIterations); + const std::string& name = name_and_measurement.first; + const double measurement = name_and_measurement.second; + // Counter was inserted with `kAvgIterations` flag by the constructor. + assert(counters.find(name) != counters.end()); + counters[name].value += measurement; } } } void State::ResumeTiming() { - BM_CHECK(started_ && !finished_ && !error_occurred_); + BM_CHECK(started_ && !finished_ && !skipped()); timer_->StartTimer(); if (perf_counters_measurement_) { perf_counters_measurement_->Start(); } } -void State::SkipWithError(const char* msg) { - BM_CHECK(msg); - error_occurred_ = true; +void State::SkipWithMessage(const std::string& msg) { + skipped_ = internal::SkippedWithMessage; { MutexLock l(manager_->GetBenchmarkMutex()); - if (manager_->results.has_error_ == false) { - manager_->results.error_message_ = msg; - manager_->results.has_error_ = true; + if (internal::NotSkipped == manager_->results.skipped_) { + manager_->results.skip_message_ = msg; + manager_->results.skipped_ = skipped_; + } + } + total_iterations_ = 0; + if (timer_->running()) timer_->StopTimer(); +} + +void State::SkipWithError(const std::string& msg) { + skipped_ = internal::SkippedWithError; + { + MutexLock l(manager_->GetBenchmarkMutex()); + if (internal::NotSkipped == manager_->results.skipped_) { + manager_->results.skip_message_ = msg; + manager_->results.skipped_ = skipped_; } } total_iterations_ = 0; @@ -220,7 +285,7 @@ void State::SetIterationTime(double seconds) { timer_->SetIterationTime(seconds); } -void State::SetLabel(const char* label) { +void State::SetLabel(const std::string& label) { MutexLock l(manager_->GetBenchmarkMutex()); manager_->results.report_label_ = label; } @@ -228,14 +293,14 @@ void State::SetLabel(const char* label) { void State::StartKeepRunning() { BM_CHECK(!started_ && !finished_); started_ = true; - total_iterations_ = error_occurred_ ? 0 : max_iterations; + total_iterations_ = skipped() ? 0 : max_iterations; manager_->StartStopBarrier(); - if (!error_occurred_) ResumeTiming(); + if (!skipped()) ResumeTiming(); } void State::FinishKeepRunning() { - BM_CHECK(started_ && (!finished_ || error_occurred_)); - if (!error_occurred_) { + BM_CHECK(started_ && (!finished_ || skipped())); + if (!skipped()) { PauseTiming(); } // Total iterations has now wrapped around past 0. Fix this. @@ -313,14 +378,26 @@ void RunBenchmarks(const std::vector& benchmarks, size_t num_repetitions_total = 0; + // This perfcounters object needs to be created before the runners vector + // below so it outlasts their lifetime. + PerfCountersMeasurement perfcounters( + StrSplit(FLAGS_benchmark_perf_counters, ',')); + + // Vector of benchmarks to run std::vector runners; runners.reserve(benchmarks.size()); + + // Count the number of benchmarks with threads to warn the user in case + // performance counters are used. + int benchmarks_with_threads = 0; + + // Loop through all benchmarks for (const BenchmarkInstance& benchmark : benchmarks) { BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr; if (benchmark.complexity() != oNone) reports_for_family = &per_family_reports[benchmark.family_index()]; - - runners.emplace_back(benchmark, reports_for_family); + benchmarks_with_threads += (benchmark.threads() > 1); + runners.emplace_back(benchmark, &perfcounters, reports_for_family); int num_repeats_of_this_instance = runners.back().GetNumRepeats(); num_repetitions_total += num_repeats_of_this_instance; if (reports_for_family) @@ -328,6 +405,17 @@ void RunBenchmarks(const std::vector& benchmarks, } assert(runners.size() == benchmarks.size() && "Unexpected runner count."); + // The use of performance counters with threads would be unintuitive for + // the average user so we need to warn them about this case + if ((benchmarks_with_threads > 0) && (perfcounters.num_counters() > 0)) { + GetErrorLogInstance() + << "***WARNING*** There are " << benchmarks_with_threads + << " benchmarks with threads and " << perfcounters.num_counters() + << " performance counters were requested. Beware counters will " + "reflect the combined usage across all " + "threads.\n"; + } + std::vector repetition_indices; repetition_indices.reserve(num_repetitions_total); for (size_t runner_index = 0, num_runners = runners.size(); @@ -351,6 +439,12 @@ void RunBenchmarks(const std::vector& benchmarks, if (runner.HasRepeatsRemaining()) continue; // FIXME: report each repetition separately, not all of them in bulk. + display_reporter->ReportRunsConfig( + runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters()); + if (file_reporter) + file_reporter->ReportRunsConfig( + runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters()); + RunResults run_results = runner.GetResults(); // Maybe calculate complexity report @@ -384,14 +478,15 @@ std::unique_ptr CreateReporter( typedef std::unique_ptr PtrType; if (name == "console") { return PtrType(new ConsoleReporter(output_opts)); - } else if (name == "json") { - return PtrType(new JSONReporter); - } else if (name == "csv") { - return PtrType(new CSVReporter); - } else { - std::cerr << "Unexpected format: '" << name << "'\n"; - std::exit(1); } + if (name == "json") { + return PtrType(new JSONReporter()); + } + if (name == "csv") { + return PtrType(new CSVReporter()); + } + std::cerr << "Unexpected format: '" << name << "'\n"; + std::exit(1); } BENCHMARK_RESTORE_DEPRECATED_WARNING @@ -428,6 +523,14 @@ ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { } // end namespace internal +BenchmarkReporter* CreateDefaultDisplayReporter() { + static auto default_display_reporter = + internal::CreateReporter(FLAGS_benchmark_format, + internal::GetOutputOptions()) + .release(); + return default_display_reporter; +} + size_t RunSpecifiedBenchmarks() { return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter); } @@ -463,8 +566,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::unique_ptr default_display_reporter; std::unique_ptr default_file_reporter; if (!display_reporter) { - default_display_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); + default_display_reporter.reset(CreateDefaultDisplayReporter()); display_reporter = default_display_reporter.get(); } auto& Out = display_reporter->GetOutputStream(); @@ -485,7 +587,9 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, } if (!file_reporter) { default_file_reporter = internal::CreateReporter( - FLAGS_benchmark_out_format, ConsoleReporter::OO_None); + FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular + ? ConsoleReporter::OO_Tabular + : ConsoleReporter::OO_None); file_reporter = default_file_reporter.get(); } file_reporter->SetOutputStream(&output_file); @@ -510,8 +614,23 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, return benchmarks.size(); } +namespace { +// stores the time unit benchmarks use by default +TimeUnit default_time_unit = kNanosecond; +} // namespace + +TimeUnit GetDefaultTimeUnit() { return default_time_unit; } + +void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; } + std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; } +void SetBenchmarkFilter(std::string value) { + FLAGS_benchmark_filter = std::move(value); +} + +int32_t GetBenchmarkVerbosity() { return FLAGS_v; } + void RegisterMemoryManager(MemoryManager* manager) { internal::memory_manager = manager; } @@ -528,27 +647,31 @@ void AddCustomContext(const std::string& key, const std::string& value) { namespace internal { +void (*HelperPrintf)(); + void PrintUsageAndExit() { - fprintf(stdout, - "benchmark" - " [--benchmark_list_tests={true|false}]\n" - " [--benchmark_filter=]\n" - " [--benchmark_min_time=]\n" - " [--benchmark_repetitions=]\n" - " [--benchmark_enable_random_interleaving={true|false}]\n" - " [--benchmark_report_aggregates_only={true|false}]\n" - " [--benchmark_display_aggregates_only={true|false}]\n" - " [--benchmark_format=]\n" - " [--benchmark_out=]\n" - " [--benchmark_out_format=]\n" - " [--benchmark_color={auto|true|false}]\n" - " [--benchmark_counters_tabular={true|false}]\n" - " [--benchmark_perf_counters=,...]\n" - " [--benchmark_context==,...]\n" - " [--v=]\n"); + HelperPrintf(); exit(0); } +void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) { + if (time_unit_flag == "s") { + return SetDefaultTimeUnit(kSecond); + } + if (time_unit_flag == "ms") { + return SetDefaultTimeUnit(kMillisecond); + } + if (time_unit_flag == "us") { + return SetDefaultTimeUnit(kMicrosecond); + } + if (time_unit_flag == "ns") { + return SetDefaultTimeUnit(kNanosecond); + } + if (!time_unit_flag.empty()) { + PrintUsageAndExit(); + } +} + void ParseCommandLineFlags(int* argc, char** argv) { using namespace benchmark; BenchmarkReporter::Context::executable_name = @@ -557,8 +680,10 @@ void ParseCommandLineFlags(int* argc, char** argv) { if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) || ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || - ParseDoubleFlag(argv[i], "benchmark_min_time", + ParseStringFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) || + ParseDoubleFlag(argv[i], "benchmark_min_warmup_time", + &FLAGS_benchmark_min_warmup_time) || ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) || ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving", @@ -578,6 +703,8 @@ void ParseCommandLineFlags(int* argc, char** argv) { &FLAGS_benchmark_perf_counters) || ParseKeyValueFlag(argv[i], "benchmark_context", &FLAGS_benchmark_context) || + ParseStringFlag(argv[i], "benchmark_time_unit", + &FLAGS_benchmark_time_unit) || ParseInt32Flag(argv[i], "v", &FLAGS_v)) { for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; @@ -593,6 +720,7 @@ void ParseCommandLineFlags(int* argc, char** argv) { PrintUsageAndExit(); } } + SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit); if (FLAGS_benchmark_color.empty()) { PrintUsageAndExit(); } @@ -608,7 +736,32 @@ int InitializeStreams() { } // end namespace internal -void Initialize(int* argc, char** argv) { +void PrintDefaultHelp() { + fprintf(stdout, + "benchmark" + " [--benchmark_list_tests={true|false}]\n" + " [--benchmark_filter=]\n" + " [--benchmark_min_time=`x` OR `s` ]\n" + " [--benchmark_min_warmup_time=]\n" + " [--benchmark_repetitions=]\n" + " [--benchmark_enable_random_interleaving={true|false}]\n" + " [--benchmark_report_aggregates_only={true|false}]\n" + " [--benchmark_display_aggregates_only={true|false}]\n" + " [--benchmark_format=]\n" + " [--benchmark_out=]\n" + " [--benchmark_out_format=]\n" + " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" +#if defined HAVE_LIBPFM + " [--benchmark_perf_counters=,...]\n" +#endif + " [--benchmark_context==,...]\n" + " [--benchmark_time_unit={ns|us|ms|s}]\n" + " [--v=]\n"); +} + +void Initialize(int* argc, char** argv, void (*HelperPrintf)()) { + internal::HelperPrintf = HelperPrintf; internal::ParseCommandLineFlags(argc, argv); internal::LogLevel() = FLAGS_v; } diff --git a/src/benchmark_api_internal.cc b/src/benchmark_api_internal.cc index 4de36e3..286f986 100644 --- a/src/benchmark_api_internal.cc +++ b/src/benchmark_api_internal.cc @@ -16,7 +16,7 @@ BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx, per_family_instance_index_(per_family_instance_idx), aggregation_report_mode_(benchmark_.aggregation_report_mode_), args_(args), - time_unit_(benchmark_.time_unit_), + time_unit_(benchmark_.GetTimeUnit()), measure_process_cpu_time_(benchmark_.measure_process_cpu_time_), use_real_time_(benchmark_.use_real_time_), use_manual_time_(benchmark_.use_manual_time_), @@ -25,6 +25,7 @@ BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx, statistics_(benchmark_.statistics_), repetitions_(benchmark_.repetitions_), min_time_(benchmark_.min_time_), + min_warmup_time_(benchmark_.min_warmup_time_), iterations_(benchmark_.iterations_), threads_(thread_count) { name_.function_name = benchmark_.name_; @@ -50,6 +51,11 @@ BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx, name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_); } + if (!IsZero(benchmark->min_warmup_time_)) { + name_.min_warmup_time = + StrFormat("min_warmup_time:%0.3f", benchmark_.min_warmup_time_); + } + if (benchmark_.iterations_ != 0) { name_.iterations = StrFormat( "iterations:%lu", static_cast(benchmark_.iterations_)); @@ -87,24 +93,24 @@ State BenchmarkInstance::Run( IterationCount iters, int thread_id, internal::ThreadTimer* timer, internal::ThreadManager* manager, internal::PerfCountersMeasurement* perf_counters_measurement) const { - State st(iters, args_, thread_id, threads_, timer, manager, - perf_counters_measurement); + State st(name_.function_name, iters, args_, thread_id, threads_, timer, + manager, perf_counters_measurement); benchmark_.Run(st); return st; } void BenchmarkInstance::Setup() const { if (setup_) { - State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, - nullptr); + State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_, + nullptr, nullptr, nullptr); setup_(st); } } void BenchmarkInstance::Teardown() const { if (teardown_) { - State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, - nullptr); + State st(name_.function_name, /*iters*/ 1, args_, /*thread_id*/ 0, threads_, + nullptr, nullptr, nullptr); teardown_(st); } } diff --git a/src/benchmark_api_internal.h b/src/benchmark_api_internal.h index 94c2b29..94f5165 100644 --- a/src/benchmark_api_internal.h +++ b/src/benchmark_api_internal.h @@ -36,6 +36,7 @@ class BenchmarkInstance { const std::vector& statistics() const { return statistics_; } int repetitions() const { return repetitions_; } double min_time() const { return min_time_; } + double min_warmup_time() const { return min_warmup_time_; } IterationCount iterations() const { return iterations_; } int threads() const { return threads_; } void Setup() const; @@ -62,6 +63,7 @@ class BenchmarkInstance { const std::vector& statistics_; int repetitions_; double min_time_; + double min_warmup_time_; IterationCount iterations_; int threads_; // Number of concurrent threads to us @@ -76,6 +78,7 @@ bool FindBenchmarksInternal(const std::string& re, bool IsZero(double n); +BENCHMARK_EXPORT ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); } // end namespace internal diff --git a/src/benchmark_main.cc b/src/benchmark_main.cc index b3b2478..cd61cd2 100644 --- a/src/benchmark_main.cc +++ b/src/benchmark_main.cc @@ -14,4 +14,5 @@ #include "benchmark/benchmark.h" +BENCHMARK_EXPORT int main(int, char**); BENCHMARK_MAIN(); diff --git a/src/benchmark_name.cc b/src/benchmark_name.cc index 2a17ebc..01676bb 100644 --- a/src/benchmark_name.cc +++ b/src/benchmark_name.cc @@ -51,8 +51,9 @@ std::string join(char delimiter, const Ts&... ts) { } } // namespace +BENCHMARK_EXPORT std::string BenchmarkName::str() const { - return join('/', function_name, args, min_time, iterations, repetitions, - time_type, threads); + return join('/', function_name, args, min_time, min_warmup_time, iterations, + repetitions, time_type, threads); } } // namespace benchmark diff --git a/src/benchmark_register.cc b/src/benchmark_register.cc index 61a0c26..e447c9a 100644 --- a/src/benchmark_register.cc +++ b/src/benchmark_register.cc @@ -15,7 +15,7 @@ #include "benchmark_register.h" #ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) #include #endif #include @@ -53,10 +53,13 @@ namespace benchmark { namespace { // For non-dense Range, intermediate values are powers of kRangeMultiplier. -static const int kRangeMultiplier = 8; +static constexpr int kRangeMultiplier = 8; + // The size of a benchmark family determines is the number of inputs to repeat // the benchmark on. If this is "large" then warn the user during configuration. -static const size_t kMaxFamilySize = 100; +static constexpr size_t kMaxFamilySize = 100; + +static constexpr char kDisabledPrefix[] = "DISABLED_"; } // end namespace namespace internal { @@ -116,10 +119,10 @@ bool BenchmarkFamilies::FindBenchmarks( // Make regular expression out of command-line flag std::string error_msg; Regex re; - bool isNegativeFilter = false; + bool is_negative_filter = false; if (spec[0] == '-') { spec.replace(0, 1, ""); - isNegativeFilter = true; + is_negative_filter = true; } if (!re.Init(spec, &error_msg)) { Err << "Could not compile benchmark re: " << error_msg << std::endl; @@ -154,7 +157,8 @@ bool BenchmarkFamilies::FindBenchmarks( << " will be repeated at least " << family_size << " times.\n"; } // reserve in the special case the regex ".", since we know the final - // family size. + // family size. this doesn't take into account any disabled benchmarks + // so worst case we reserve more than we need. if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size); for (auto const& args : family->args_) { @@ -164,8 +168,9 @@ bool BenchmarkFamilies::FindBenchmarks( num_threads); const auto full_name = instance.name().str(); - if ((re.Match(full_name) && !isNegativeFilter) || - (!re.Match(full_name) && isNegativeFilter)) { + if (full_name.rfind(kDisabledPrefix, 0) != 0 && + ((re.Match(full_name) && !is_negative_filter) || + (!re.Match(full_name) && is_negative_filter))) { benchmarks->push_back(std::move(instance)); ++per_family_instance_index; @@ -199,12 +204,14 @@ bool FindBenchmarksInternal(const std::string& re, // Benchmark //=============================================================================// -Benchmark::Benchmark(const char* name) +Benchmark::Benchmark(const std::string& name) : name_(name), aggregation_report_mode_(ARM_Unspecified), - time_unit_(kNanosecond), + time_unit_(GetDefaultTimeUnit()), + use_default_time_unit_(true), range_multiplier_(kRangeMultiplier), min_time_(0), + min_warmup_time_(0), iterations_(0), repetitions_(0), measure_process_cpu_time_(false), @@ -223,7 +230,7 @@ Benchmark::Benchmark(const char* name) Benchmark::~Benchmark() {} Benchmark* Benchmark::Name(const std::string& name) { - SetName(name.c_str()); + SetName(name); return this; } @@ -235,6 +242,7 @@ Benchmark* Benchmark::Arg(int64_t x) { Benchmark* Benchmark::Unit(TimeUnit unit) { time_unit_ = unit; + use_default_time_unit_ = false; return this; } @@ -348,9 +356,17 @@ Benchmark* Benchmark::MinTime(double t) { return this; } +Benchmark* Benchmark::MinWarmUpTime(double t) { + BM_CHECK(t >= 0.0); + BM_CHECK(iterations_ == 0); + min_warmup_time_ = t; + return this; +} + Benchmark* Benchmark::Iterations(IterationCount n) { BM_CHECK(n > 0); BM_CHECK(IsZero(min_time_)); + BM_CHECK(IsZero(min_warmup_time_)); iterations_ = n; return this; } @@ -452,7 +468,9 @@ Benchmark* Benchmark::ThreadPerCpu() { return this; } -void Benchmark::SetName(const char* name) { name_ = name; } +void Benchmark::SetName(const std::string& name) { name_ = name; } + +const char* Benchmark::GetName() const { return name_.c_str(); } int Benchmark::ArgsCnt() const { if (args_.empty()) { @@ -462,6 +480,16 @@ int Benchmark::ArgsCnt() const { return static_cast(args_.front().size()); } +const char* Benchmark::GetArgName(int arg) const { + BM_CHECK_GE(arg, 0); + BM_CHECK_LT(arg, static_cast(arg_names_.size())); + return arg_names_[arg].c_str(); +} + +TimeUnit Benchmark::GetTimeUnit() const { + return use_default_time_unit_ ? GetDefaultTimeUnit() : time_unit_; +} + //=============================================================================// // FunctionBenchmark //=============================================================================// diff --git a/src/benchmark_register.h b/src/benchmark_register.h index d3f4974..53367c7 100644 --- a/src/benchmark_register.h +++ b/src/benchmark_register.h @@ -1,6 +1,7 @@ #ifndef BENCHMARK_REGISTER_H #define BENCHMARK_REGISTER_H +#include #include #include @@ -23,7 +24,7 @@ typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, static const T kmax = std::numeric_limits::max(); // Space out the values in multiples of "mult" - for (T i = static_cast(1); i <= hi; i *= mult) { + for (T i = static_cast(1); i <= hi; i *= static_cast(mult)) { if (i >= lo) { dst->push_back(i); } @@ -32,7 +33,7 @@ typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, if (i > kmax / mult) break; } - return dst->begin() + start_offset; + return dst->begin() + static_cast(start_offset); } template diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index eac807b..f7ae424 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -19,7 +19,7 @@ #include "internal_macros.h" #ifndef BENCHMARK_OS_WINDOWS -#ifndef BENCHMARK_OS_FUCHSIA +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) #include #endif #include @@ -28,11 +28,14 @@ #include #include +#include +#include #include #include #include #include #include +#include #include #include #include @@ -62,6 +65,8 @@ MemoryManager* memory_manager = nullptr; namespace { static constexpr IterationCount kMaxIterations = 1000000000; +const double kDefaultMinTime = + std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr); BenchmarkReporter::Run CreateRunReport( const benchmark::internal::BenchmarkInstance& b, @@ -75,8 +80,8 @@ BenchmarkReporter::Run CreateRunReport( report.run_name = b.name(); report.family_index = b.family_index(); report.per_family_instance_index = b.per_family_instance_index(); - report.error_occurred = results.has_error_; - report.error_message = results.error_message_; + report.skipped = results.skipped_; + report.skip_message = results.skip_message_; report.report_label = results.report_label_; // This is the total iterations across all threads. report.iterations = results.iterations; @@ -85,7 +90,7 @@ BenchmarkReporter::Run CreateRunReport( report.repetition_index = repetition_index; report.repetitions = repeats; - if (!report.error_occurred) { + if (!report.skipped) { if (b.use_manual_time()) { report.real_accumulated_time = results.manual_time_used; } else { @@ -122,9 +127,10 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters, b->measure_process_cpu_time() ? internal::ThreadTimer::CreateProcessCpuTime() : internal::ThreadTimer::Create()); + State st = b->Run(iters, thread_id, &timer, manager, perf_counters_measurement); - BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) + BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations) << "Benchmark returned before State::KeepRunning() returned false!"; { MutexLock l(manager->GetBenchmarkMutex()); @@ -139,24 +145,100 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters, manager->NotifyThreadComplete(); } +double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b, + const BenchTimeType& iters_or_time) { + if (!IsZero(b.min_time())) return b.min_time(); + // If the flag was used to specify number of iters, then return the default + // min_time. + if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime; + + return iters_or_time.time; +} + +IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b, + const BenchTimeType& iters_or_time) { + if (b.iterations() != 0) return b.iterations(); + + // We've already concluded that this flag is currently used to pass + // iters but do a check here again anyway. + BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS); + return iters_or_time.iters; +} + } // end namespace +BenchTimeType ParseBenchMinTime(const std::string& value) { + BenchTimeType ret; + + if (value.empty()) { + ret.tag = BenchTimeType::TIME; + ret.time = 0.0; + return ret; + } + + if (value.back() == 'x') { + char* p_end; + // Reset errno before it's changed by strtol. + errno = 0; + IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10); + + // After a valid parse, p_end should have been set to + // point to the 'x' suffix. + BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x') + << "Malformed iters value passed to --benchmark_min_time: `" << value + << "`. Expected --benchmark_min_time=x."; + + ret.tag = BenchTimeType::ITERS; + ret.iters = num_iters; + return ret; + } + + bool has_suffix = value.back() == 's'; + if (!has_suffix) { + BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. " + "Eg., `30s` for 30-seconds."; + } + + char* p_end; + // Reset errno before it's changed by strtod. + errno = 0; + double min_time = std::strtod(value.c_str(), &p_end); + + // After a successful parse, p_end should point to the suffix 's', + // or the end of the string if the suffix was omitted. + BM_CHECK(errno == 0 && p_end != nullptr && + ((has_suffix && *p_end == 's') || *p_end == '\0')) + << "Malformed seconds value passed to --benchmark_min_time: `" << value + << "`. Expected --benchmark_min_time=x."; + + ret.tag = BenchTimeType::TIME; + ret.time = min_time; + + return ret; +} + BenchmarkRunner::BenchmarkRunner( const benchmark::internal::BenchmarkInstance& b_, + PerfCountersMeasurement* pcm_, BenchmarkReporter::PerFamilyRunReports* reports_for_family_) : b(b_), reports_for_family(reports_for_family_), - min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time), + parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)), + min_time(ComputeMinTime(b_, parsed_benchtime_flag)), + min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0) + ? b.min_warmup_time() + : FLAGS_benchmark_min_warmup_time), + warmup_done(!(min_warmup_time > 0.0)), repeats(b.repetitions() != 0 ? b.repetitions() : FLAGS_benchmark_repetitions), - has_explicit_iteration_count(b.iterations() != 0), + has_explicit_iteration_count(b.iterations() != 0 || + parsed_benchtime_flag.tag == + BenchTimeType::ITERS), pool(b.threads() - 1), - iters(has_explicit_iteration_count ? b.iterations() : 1), - perf_counters_measurement( - PerfCounters::Create(StrSplit(FLAGS_benchmark_perf_counters, ','))), - perf_counters_measurement_ptr(perf_counters_measurement.IsValid() - ? &perf_counters_measurement - : nullptr) { + iters(has_explicit_iteration_count + ? ComputeIters(b_, parsed_benchtime_flag) + : 1), + perf_counters_measurement_ptr(pcm_) { run_results.display_report_aggregates_only = (FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only); @@ -169,7 +251,7 @@ BenchmarkRunner::BenchmarkRunner( run_results.file_report_aggregates_only = (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly); BM_CHECK(FLAGS_benchmark_perf_counters.empty() || - perf_counters_measurement.IsValid()) + (perf_counters_measurement_ptr->num_counters() == 0)) << "Perf counters were requested but could not be set up."; } } @@ -232,20 +314,20 @@ IterationCount BenchmarkRunner::PredictNumItersNeeded( const IterationResults& i) const { // See how much iterations should be increased by. // Note: Avoid division by zero with max(seconds, 1ns). - double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9); + double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9); // If our last run was at least 10% of FLAGS_benchmark_min_time then we // use the multiplier directly. // Otherwise we use at most 10 times expansion. // NOTE: When the last run was at least 10% of the min time the max // expansion should be 14x. - bool is_significant = (i.seconds / min_time) > 0.1; + const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1; multiplier = is_significant ? multiplier : 10.0; // So what seems to be the sufficiently-large iteration count? Round up. const IterationCount max_next_iters = static_cast( std::lround(std::max(multiplier * static_cast(i.iters), static_cast(i.iters) + 1.0))); - // But we do have *some* sanity limits though.. + // But we do have *some* limits though.. const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; @@ -257,21 +339,80 @@ bool BenchmarkRunner::ShouldReportIterationResults( // Determine if this run should be reported; // Either it has run for a sufficient amount of time // or because an error was reported. - return i.results.has_error_ || + return i.results.skipped_ || i.iters >= kMaxIterations || // Too many iterations already. - i.seconds >= min_time || // The elapsed time is large enough. + i.seconds >= + GetMinTimeToApply() || // The elapsed time is large enough. // CPU time is specified but the elapsed real time greatly exceeds // the minimum time. - // Note that user provided timers are except from this sanity check. - ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time()); + // Note that user provided timers are except from this test. + ((i.results.real_time_used >= 5 * GetMinTimeToApply()) && + !b.use_manual_time()); +} + +double BenchmarkRunner::GetMinTimeToApply() const { + // In order to re-use functionality to run and measure benchmarks for running + // a warmup phase of the benchmark, we need a way of telling whether to apply + // min_time or min_warmup_time. This function will figure out if we are in the + // warmup phase and therefore need to apply min_warmup_time or if we already + // in the benchmarking phase and min_time needs to be applied. + return warmup_done ? min_time : min_warmup_time; +} + +void BenchmarkRunner::FinishWarmUp(const IterationCount& i) { + warmup_done = true; + iters = i; +} + +void BenchmarkRunner::RunWarmUp() { + // Use the same mechanisms for warming up the benchmark as used for actually + // running and measuring the benchmark. + IterationResults i_warmup; + // Dont use the iterations determined in the warmup phase for the actual + // measured benchmark phase. While this may be a good starting point for the + // benchmark and it would therefore get rid of the need to figure out how many + // iterations are needed if min_time is set again, this may also be a complete + // wrong guess since the warmup loops might be considerably slower (e.g + // because of caching effects). + const IterationCount i_backup = iters; + + for (;;) { + b.Setup(); + i_warmup = DoNIterations(); + b.Teardown(); + + const bool finish = ShouldReportIterationResults(i_warmup); + + if (finish) { + FinishWarmUp(i_backup); + break; + } + + // Although we are running "only" a warmup phase where running enough + // iterations at once without measuring time isn't as important as it is for + // the benchmarking phase, we still do it the same way as otherwise it is + // very confusing for the user to know how to choose a proper value for + // min_warmup_time if a different approach on running it is used. + iters = PredictNumItersNeeded(i_warmup); + assert(iters > i_warmup.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } } void BenchmarkRunner::DoOneRepetition() { assert(HasRepeatsRemaining() && "Already done all repetitions?"); const bool is_the_first_repetition = num_repetitions_done == 0; - IterationResults i; + // In case a warmup phase is requested by the benchmark, run it now. + // After running the warmup phase the BenchmarkRunner should be in a state as + // this warmup never happened except the fact that warmup_done is set. Every + // other manipulation of the BenchmarkRunner instance would be a bug! Please + // fix it. + if (!warmup_done) RunWarmUp(); + + IterationResults i; // We *may* be gradually increasing the length (iteration count) // of the benchmark until we decide the results are significant. // And once we do, we report those last results and exit. @@ -324,10 +465,7 @@ void BenchmarkRunner::DoOneRepetition() { manager->WaitForAllThreads(); manager.reset(); b.Teardown(); - - BENCHMARK_DISABLE_DEPRECATED_WARNING - memory_manager->Stop(memory_result); - BENCHMARK_RESTORE_DEPRECATED_WARNING + memory_manager->Stop(*memory_result); } // Ok, now actually report. @@ -337,7 +475,7 @@ void BenchmarkRunner::DoOneRepetition() { if (reports_for_family) { ++reports_for_family->num_runs_done; - if (!report.error_occurred) reports_for_family->Runs.push_back(report); + if (!report.skipped) reports_for_family->Runs.push_back(report); } run_results.non_aggregates.push_back(report); diff --git a/src/benchmark_runner.h b/src/benchmark_runner.h index 752eefd..db2fa04 100644 --- a/src/benchmark_runner.h +++ b/src/benchmark_runner.h @@ -25,7 +25,8 @@ namespace benchmark { -BM_DECLARE_double(benchmark_min_time); +BM_DECLARE_string(benchmark_min_time); +BM_DECLARE_double(benchmark_min_warmup_time); BM_DECLARE_int32(benchmark_repetitions); BM_DECLARE_bool(benchmark_report_aggregates_only); BM_DECLARE_bool(benchmark_display_aggregates_only); @@ -43,9 +44,21 @@ struct RunResults { bool file_report_aggregates_only = false; }; +struct BENCHMARK_EXPORT BenchTimeType { + enum { ITERS, TIME } tag; + union { + IterationCount iters; + double time; + }; +}; + +BENCHMARK_EXPORT +BenchTimeType ParseBenchMinTime(const std::string& value); + class BenchmarkRunner { public: BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, + benchmark::internal::PerfCountersMeasurement* pmc_, BenchmarkReporter::PerFamilyRunReports* reports_for_family); int GetNumRepeats() const { return repeats; } @@ -62,13 +75,22 @@ class BenchmarkRunner { return reports_for_family; } + double GetMinTime() const { return min_time; } + + bool HasExplicitIters() const { return has_explicit_iteration_count; } + + IterationCount GetIters() const { return iters; } + private: RunResults run_results; const benchmark::internal::BenchmarkInstance& b; BenchmarkReporter::PerFamilyRunReports* reports_for_family; + BenchTimeType parsed_benchtime_flag; const double min_time; + const double min_warmup_time; + bool warmup_done; const int repeats; const bool has_explicit_iteration_count; @@ -82,8 +104,7 @@ class BenchmarkRunner { // So only the first repetition has to find/calculate it, // the other repetitions will just use that precomputed iteration count. - PerfCountersMeasurement perf_counters_measurement; - PerfCountersMeasurement* const perf_counters_measurement_ptr; + PerfCountersMeasurement* const perf_counters_measurement_ptr = nullptr; struct IterationResults { internal::ThreadManager::Result results; @@ -95,6 +116,12 @@ class BenchmarkRunner { IterationCount PredictNumItersNeeded(const IterationResults& i) const; bool ShouldReportIterationResults(const IterationResults& i) const; + + double GetMinTimeToApply() const; + + void FinishWarmUp(const IterationCount& i); + + void RunWarmUp(); }; } // namespace internal diff --git a/src/check.cc b/src/check.cc new file mode 100644 index 0000000..5f7526e --- /dev/null +++ b/src/check.cc @@ -0,0 +1,11 @@ +#include "check.h" + +namespace benchmark { +namespace internal { + +static AbortHandlerT* handler = &std::abort; + +BENCHMARK_EXPORT AbortHandlerT*& GetAbortHandler() { return handler; } + +} // namespace internal +} // namespace benchmark diff --git a/src/check.h b/src/check.h index 90c7bbf..c1cd5e8 100644 --- a/src/check.h +++ b/src/check.h @@ -5,18 +5,34 @@ #include #include +#include "benchmark/export.h" #include "internal_macros.h" #include "log.h" +#if defined(__GNUC__) || defined(__clang__) +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#elif defined(_MSC_VER) && !defined(__clang__) +#if _MSC_VER >= 1900 +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif +#define __func__ __FUNCTION__ +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif + namespace benchmark { namespace internal { typedef void(AbortHandlerT)(); -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} +BENCHMARK_EXPORT +AbortHandlerT*& GetAbortHandler(); BENCHMARK_NORETURN inline void CallAbortHandler() { GetAbortHandler()(); diff --git a/src/colorprint.cc b/src/colorprint.cc index 1a000a0..0bfd670 100644 --- a/src/colorprint.cc +++ b/src/colorprint.cc @@ -96,18 +96,18 @@ std::string FormatString(const char* msg, va_list args) { // currently there is no error handling for failure, so this is hack. BM_CHECK(ret >= 0); - if (ret == 0) // handle empty expansion + if (ret == 0) { // handle empty expansion return {}; - else if (static_cast(ret) < size) - return local_buff; - else { - // we did not provide a long enough buffer on our first attempt. - size = static_cast(ret) + 1; // + 1 for the null byte - std::unique_ptr buff(new char[size]); - ret = vsnprintf(buff.get(), size, msg, args); - BM_CHECK(ret > 0 && (static_cast(ret)) < size); - return buff.get(); } + if (static_cast(ret) < size) { + return local_buff; + } + // we did not provide a long enough buffer on our first attempt. + size = static_cast(ret) + 1; // + 1 for the null byte + std::unique_ptr buff(new char[size]); + ret = vsnprintf(buff.get(), size, msg, args); + BM_CHECK(ret > 0 && (static_cast(ret)) < size); + return buff.get(); } std::string FormatString(const char* msg, ...) { @@ -163,12 +163,24 @@ bool IsColorTerminal() { #else // On non-Windows platforms, we rely on the TERM variable. This list of // supported TERM values is copied from Google Test: - // . + // . const char* const SUPPORTED_TERM_VALUES[] = { - "xterm", "xterm-color", "xterm-256color", - "screen", "screen-256color", "tmux", - "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", - "linux", "cygwin", + "xterm", + "xterm-color", + "xterm-256color", + "screen", + "screen-256color", + "tmux", + "tmux-256color", + "rxvt-unicode", + "rxvt-unicode-256color", + "linux", + "cygwin", + "xterm-kitty", + "alacritty", + "foot", + "foot-extra", + "wezterm", }; const char* const term = getenv("TERM"); diff --git a/src/commandlineflags.cc b/src/commandlineflags.cc index 9615e35..dcb4149 100644 --- a/src/commandlineflags.cc +++ b/src/commandlineflags.cc @@ -121,12 +121,14 @@ static std::string FlagToEnvVar(const char* flag) { } // namespace +BENCHMARK_EXPORT bool BoolFromEnv(const char* flag, bool default_val) { const std::string env_var = FlagToEnvVar(flag); const char* const value_str = getenv(env_var.c_str()); return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); } +BENCHMARK_EXPORT int32_t Int32FromEnv(const char* flag, int32_t default_val) { const std::string env_var = FlagToEnvVar(flag); const char* const value_str = getenv(env_var.c_str()); @@ -139,6 +141,7 @@ int32_t Int32FromEnv(const char* flag, int32_t default_val) { return value; } +BENCHMARK_EXPORT double DoubleFromEnv(const char* flag, double default_val) { const std::string env_var = FlagToEnvVar(flag); const char* const value_str = getenv(env_var.c_str()); @@ -151,12 +154,14 @@ double DoubleFromEnv(const char* flag, double default_val) { return value; } +BENCHMARK_EXPORT const char* StringFromEnv(const char* flag, const char* default_val) { const std::string env_var = FlagToEnvVar(flag); const char* const value = getenv(env_var.c_str()); return value == nullptr ? default_val : value; } +BENCHMARK_EXPORT std::map KvPairsFromEnv( const char* flag, std::map default_val) { const std::string env_var = FlagToEnvVar(flag); @@ -201,6 +206,7 @@ const char* ParseFlagValue(const char* str, const char* flag, return flag_end + 1; } +BENCHMARK_EXPORT bool ParseBoolFlag(const char* str, const char* flag, bool* value) { // Gets the value of the flag as a string. const char* const value_str = ParseFlagValue(str, flag, true); @@ -213,6 +219,7 @@ bool ParseBoolFlag(const char* str, const char* flag, bool* value) { return true; } +BENCHMARK_EXPORT bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { // Gets the value of the flag as a string. const char* const value_str = ParseFlagValue(str, flag, false); @@ -225,6 +232,7 @@ bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { value); } +BENCHMARK_EXPORT bool ParseDoubleFlag(const char* str, const char* flag, double* value) { // Gets the value of the flag as a string. const char* const value_str = ParseFlagValue(str, flag, false); @@ -237,6 +245,7 @@ bool ParseDoubleFlag(const char* str, const char* flag, double* value) { value); } +BENCHMARK_EXPORT bool ParseStringFlag(const char* str, const char* flag, std::string* value) { // Gets the value of the flag as a string. const char* const value_str = ParseFlagValue(str, flag, false); @@ -248,6 +257,7 @@ bool ParseStringFlag(const char* str, const char* flag, std::string* value) { return true; } +BENCHMARK_EXPORT bool ParseKeyValueFlag(const char* str, const char* flag, std::map* value) { const char* const value_str = ParseFlagValue(str, flag, false); @@ -263,23 +273,26 @@ bool ParseKeyValueFlag(const char* str, const char* flag, return true; } +BENCHMARK_EXPORT bool IsFlag(const char* str, const char* flag) { return (ParseFlagValue(str, flag, true) != nullptr); } +BENCHMARK_EXPORT bool IsTruthyFlagValue(const std::string& value) { if (value.size() == 1) { char v = value[0]; return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); - } else if (!value.empty()) { + } + if (!value.empty()) { std::string value_lower(value); std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), [](char c) { return static_cast(::tolower(c)); }); return !(value_lower == "false" || value_lower == "no" || value_lower == "off"); - } else - return true; + } + return true; } } // end namespace benchmark diff --git a/src/commandlineflags.h b/src/commandlineflags.h index 5baaf11..7882628 100644 --- a/src/commandlineflags.h +++ b/src/commandlineflags.h @@ -5,28 +5,33 @@ #include #include +#include "benchmark/export.h" + // Macro for referencing flags. #define FLAG(name) FLAGS_##name // Macros for declaring flags. -#define BM_DECLARE_bool(name) extern bool FLAG(name) -#define BM_DECLARE_int32(name) extern int32_t FLAG(name) -#define BM_DECLARE_double(name) extern double FLAG(name) -#define BM_DECLARE_string(name) extern std::string FLAG(name) +#define BM_DECLARE_bool(name) BENCHMARK_EXPORT extern bool FLAG(name) +#define BM_DECLARE_int32(name) BENCHMARK_EXPORT extern int32_t FLAG(name) +#define BM_DECLARE_double(name) BENCHMARK_EXPORT extern double FLAG(name) +#define BM_DECLARE_string(name) BENCHMARK_EXPORT extern std::string FLAG(name) #define BM_DECLARE_kvpairs(name) \ - extern std::map FLAG(name) + BENCHMARK_EXPORT extern std::map FLAG(name) // Macros for defining flags. #define BM_DEFINE_bool(name, default_val) \ - bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) + BENCHMARK_EXPORT bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) #define BM_DEFINE_int32(name, default_val) \ - int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val) + BENCHMARK_EXPORT int32_t FLAG(name) = \ + benchmark::Int32FromEnv(#name, default_val) #define BM_DEFINE_double(name, default_val) \ - double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val) + BENCHMARK_EXPORT double FLAG(name) = \ + benchmark::DoubleFromEnv(#name, default_val) #define BM_DEFINE_string(name, default_val) \ - std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val) -#define BM_DEFINE_kvpairs(name, default_val) \ - std::map FLAG(name) = \ + BENCHMARK_EXPORT std::string FLAG(name) = \ + benchmark::StringFromEnv(#name, default_val) +#define BM_DEFINE_kvpairs(name, default_val) \ + BENCHMARK_EXPORT std::map FLAG(name) = \ benchmark::KvPairsFromEnv(#name, default_val) namespace benchmark { @@ -35,6 +40,7 @@ namespace benchmark { // // If the variable exists, returns IsTruthyFlagValue() value; if not, // returns the given default value. +BENCHMARK_EXPORT bool BoolFromEnv(const char* flag, bool default_val); // Parses an Int32 from the environment variable corresponding to the given @@ -42,6 +48,7 @@ bool BoolFromEnv(const char* flag, bool default_val); // // If the variable exists, returns ParseInt32() value; if not, returns // the given default value. +BENCHMARK_EXPORT int32_t Int32FromEnv(const char* flag, int32_t default_val); // Parses an Double from the environment variable corresponding to the given @@ -49,6 +56,7 @@ int32_t Int32FromEnv(const char* flag, int32_t default_val); // // If the variable exists, returns ParseDouble(); if not, returns // the given default value. +BENCHMARK_EXPORT double DoubleFromEnv(const char* flag, double default_val); // Parses a string from the environment variable corresponding to the given @@ -56,6 +64,7 @@ double DoubleFromEnv(const char* flag, double default_val); // // If variable exists, returns its value; if not, returns // the given default value. +BENCHMARK_EXPORT const char* StringFromEnv(const char* flag, const char* default_val); // Parses a set of kvpairs from the environment variable corresponding to the @@ -63,6 +72,7 @@ const char* StringFromEnv(const char* flag, const char* default_val); // // If variable exists, returns its value; if not, returns // the given default value. +BENCHMARK_EXPORT std::map KvPairsFromEnv( const char* flag, std::map default_val); @@ -75,40 +85,47 @@ std::map KvPairsFromEnv( // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseBoolFlag(const char* str, const char* flag, bool* value); // Parses a string for an Int32 flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); // Parses a string for a Double flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseDoubleFlag(const char* str, const char* flag, double* value); // Parses a string for a string flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseStringFlag(const char* str, const char* flag, std::string* value); // Parses a string for a kvpairs flag in the form "--flag=key=value,key=value" // // On success, stores the value of the flag in *value and returns true. On // failure returns false, though *value may have been mutated. +BENCHMARK_EXPORT bool ParseKeyValueFlag(const char* str, const char* flag, std::map* value); // Returns true if the string matches the flag. +BENCHMARK_EXPORT bool IsFlag(const char* str, const char* flag); // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or // some non-alphanumeric character. Also returns false if the value matches // one of 'no', 'false', 'off' (case-insensitive). As a special case, also // returns true if value is the empty string. +BENCHMARK_EXPORT bool IsTruthyFlagValue(const std::string& value); } // end namespace benchmark diff --git a/src/complexity.h b/src/complexity.h index df29b48..0a0679b 100644 --- a/src/complexity.h +++ b/src/complexity.h @@ -31,7 +31,7 @@ std::vector ComputeBigO( const std::vector& reports); // This data structure will contain the result returned by MinimalLeastSq -// - coef : Estimated coeficient for the high-order term as +// - coef : Estimated coefficient for the high-order term as // interpolated from data. // - rms : Normalized Root Mean Squared Error. // - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 04cc0b7..10e05e1 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -33,6 +33,7 @@ namespace benchmark { +BENCHMARK_EXPORT bool ConsoleReporter::ReportContext(const Context& context) { name_field_width_ = context.name_field_width; printed_header_ = false; @@ -52,6 +53,7 @@ bool ConsoleReporter::ReportContext(const Context& context) { return true; } +BENCHMARK_EXPORT void ConsoleReporter::PrintHeader(const Run& run) { std::string str = FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), @@ -69,6 +71,7 @@ void ConsoleReporter::PrintHeader(const Run& run) { GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; } +BENCHMARK_EXPORT void ConsoleReporter::ReportRuns(const std::vector& reports) { for (const auto& run : reports) { // print the header: @@ -99,6 +102,9 @@ static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, } static std::string FormatTime(double time) { + // For the time columns of the console printer 13 digits are reserved. One of + // them is a space and max two of them are the time unit (e.g ns). That puts + // us at 10 digits usable for the number. // Align decimal places... if (time < 1.0) { return FormatString("%10.3f", time); @@ -109,9 +115,15 @@ static std::string FormatTime(double time) { if (time < 100.0) { return FormatString("%10.1f", time); } + // Assuming the time is at max 9.9999e+99 and we have 10 digits for the + // number, we get 10-1(.)-1(e)-1(sign)-2(exponent) = 5 digits to print. + if (time > 9999999999 /*max 10 digit number*/) { + return FormatString("%1.4e", time); + } return FormatString("%10.0f", time); } +BENCHMARK_EXPORT void ConsoleReporter::PrintRunData(const Run& result) { typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); auto& Out = GetOutputStream(); @@ -123,9 +135,13 @@ void ConsoleReporter::PrintRunData(const Run& result) { printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str()); - if (result.error_occurred) { + if (internal::SkippedWithError == result.skipped) { printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", - result.error_message.c_str()); + result.skip_message.c_str()); + printer(Out, COLOR_DEFAULT, "\n"); + return; + } else if (internal::SkippedWithMessage == result.skipped) { + printer(Out, COLOR_WHITE, "SKIPPED: \'%s\'", result.skip_message.c_str()); printer(Out, COLOR_DEFAULT, "\n"); return; } diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 1c5e9fa..7b56da1 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -52,11 +52,13 @@ std::string CsvEscape(const std::string& s) { return '"' + tmp + '"'; } +BENCHMARK_EXPORT bool CSVReporter::ReportContext(const Context& context) { PrintBasicContext(&GetErrorStream(), context); return true; } +BENCHMARK_EXPORT void CSVReporter::ReportRuns(const std::vector& reports) { std::ostream& Out = GetOutputStream(); @@ -103,13 +105,14 @@ void CSVReporter::ReportRuns(const std::vector& reports) { } } +BENCHMARK_EXPORT void CSVReporter::PrintRunData(const Run& run) { std::ostream& Out = GetOutputStream(); Out << CsvEscape(run.benchmark_name()) << ","; - if (run.error_occurred) { + if (run.skipped) { Out << std::string(elements.size() - 3, ','); - Out << "true,"; - Out << CsvEscape(run.error_message) << "\n"; + Out << std::boolalpha << (internal::SkippedWithError == run.skipped) << ","; + Out << CsvEscape(run.skip_message) << "\n"; return; } diff --git a/src/cycleclock.h b/src/cycleclock.h index d65d32a..ae1ef2d 100644 --- a/src/cycleclock.h +++ b/src/cycleclock.h @@ -36,7 +36,8 @@ // declarations of some other intrinsics, breaking compilation. // Therefore, we simply declare __rdtsc ourselves. See also // http://connect.microsoft.com/VisualStudio/feedback/details/262047 -#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) +#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) && \ + !defined(_M_ARM64EC) extern "C" uint64_t __rdtsc(); #pragma intrinsic(__rdtsc) #endif @@ -114,7 +115,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // when I know it will work. Otherwise, I'll use __rdtsc and hope // the code is being compiled with a non-ancient compiler. _asm rdtsc -#elif defined(COMPILER_MSVC) && defined(_M_ARM64) +#elif defined(COMPILER_MSVC) && (defined(_M_ARM64) || defined(_M_ARM64EC)) // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics // and https://reviews.llvm.org/D53115 int64_t virtual_timer_value; @@ -132,7 +133,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // Native Client does not provide any API to access cycle counter. // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday - // because is provides nanosecond resolution (which is noticable at + // because is provides nanosecond resolution (which is noticeable at // least for PNaCl modules running on x86 Mac & Linux). // Initialize to always return 0 if clock_gettime fails. struct timespec ts = {0, 0}; @@ -173,7 +174,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__loongarch__) +#elif defined(__loongarch__) || defined(__csky__) struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; @@ -212,6 +213,10 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__hexagon__) + uint64_t pcycle; + asm volatile("%0 = C15:14" : "=r"(pcycle)); + return static_cast(pcycle); #else // The soft failover to a generic implementation is automatic only for ARM. // For other platforms the developer is expected to make an attempt to create diff --git a/src/internal_macros.h b/src/internal_macros.h index 91f367b..8dd7d0c 100644 --- a/src/internal_macros.h +++ b/src/internal_macros.h @@ -1,8 +1,6 @@ #ifndef BENCHMARK_INTERNAL_MACROS_H_ #define BENCHMARK_INTERNAL_MACROS_H_ -#include "benchmark/benchmark.h" - /* Needed to detect STL */ #include @@ -44,6 +42,19 @@ #define BENCHMARK_OS_CYGWIN 1 #elif defined(_WIN32) #define BENCHMARK_OS_WINDOWS 1 + // WINAPI_FAMILY_PARTITION is defined in winapifamily.h. + // We include windows.h which implicitly includes winapifamily.h for compatibility. + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #if defined(WINAPI_FAMILY_PARTITION) + #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + #define BENCHMARK_OS_WINDOWS_WIN32 1 + #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define BENCHMARK_OS_WINDOWS_RT 1 + #endif + #endif #if defined(__MINGW32__) #define BENCHMARK_OS_MINGW 1 #endif @@ -80,6 +91,8 @@ #define BENCHMARK_OS_QNX 1 #elif defined(__MVS__) #define BENCHMARK_OS_ZOS 1 +#elif defined(__hexagon__) +#define BENCHMARK_OS_QURT 1 #endif #if defined(__ANDROID__) && defined(__GLIBCXX__) diff --git a/src/json_reporter.cc b/src/json_reporter.cc index e84a4ed..6559dfd 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -28,10 +28,6 @@ #include "timers.h" namespace benchmark { -namespace internal { -extern std::map* global_context; -} - namespace { std::string StrEscape(const std::string& s) { @@ -89,12 +85,6 @@ std::string FormatKV(std::string const& key, int64_t value) { return ss.str(); } -std::string FormatKV(std::string const& key, IterationCount value) { - std::stringstream ss; - ss << '"' << StrEscape(key) << "\": " << value; - return ss.str(); -} - std::string FormatKV(std::string const& key, double value) { std::stringstream ss; ss << '"' << StrEscape(key) << "\": "; @@ -184,8 +174,11 @@ bool JSONReporter::ReportContext(const Context& context) { #endif out << indent << FormatKV("library_build_type", build_type); - if (internal::global_context != nullptr) { - for (const auto& kv : *internal::global_context) { + std::map* global_context = + internal::GetGlobalContext(); + + if (global_context != nullptr) { + for (const auto& kv : *global_context) { out << ",\n"; out << indent << FormatKV(kv.first, kv.second); } @@ -261,9 +254,12 @@ void JSONReporter::PrintRunData(Run const& run) { BENCHMARK_UNREACHABLE(); }()) << ",\n"; } - if (run.error_occurred) { - out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; - out << indent << FormatKV("error_message", run.error_message) << ",\n"; + if (internal::SkippedWithError == run.skipped) { + out << indent << FormatKV("error_occurred", true) << ",\n"; + out << indent << FormatKV("error_message", run.skip_message) << ",\n"; + } else if (internal::SkippedWithMessage == run.skipped) { + out << indent << FormatKV("skipped", true) << ",\n"; + out << indent << FormatKV("skip_message", run.skip_message) << ",\n"; } if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; @@ -301,7 +297,8 @@ void JSONReporter::PrintRunData(Run const& run) { out << ",\n" << indent << FormatKV("max_bytes_used", memory_result.max_bytes_used); - auto report_if_present = [&out, &indent](const char* label, int64_t val) { + auto report_if_present = [&out, &indent](const std::string& label, + int64_t val) { if (val != MemoryManager::TombstoneValue) out << ",\n" << indent << FormatKV(label, val); }; diff --git a/src/log.h b/src/log.h index 48c071a..9a21400 100644 --- a/src/log.h +++ b/src/log.h @@ -4,7 +4,12 @@ #include #include -#include "benchmark/benchmark.h" +// NOTE: this is also defined in benchmark.h but we're trying to avoid a +// dependency. +// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define BENCHMARK_HAS_CXX11 +#endif namespace benchmark { namespace internal { @@ -23,7 +28,16 @@ class LogType { private: LogType(std::ostream* out) : out_(out) {} std::ostream* out_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); + + // NOTE: we could use BENCHMARK_DISALLOW_COPY_AND_ASSIGN but we shouldn't have + // a dependency on benchmark.h from here. +#ifndef BENCHMARK_HAS_CXX11 + LogType(const LogType&); + LogType& operator=(const LogType&); +#else + LogType(const LogType&) = delete; + LogType& operator=(const LogType&) = delete; +#endif }; template @@ -47,13 +61,13 @@ inline int& LogLevel() { } inline LogType& GetNullLogInstance() { - static LogType log(nullptr); - return log; + static LogType null_log(static_cast(nullptr)); + return null_log; } inline LogType& GetErrorLogInstance() { - static LogType log(&std::clog); - return log; + static LogType error_log(&std::clog); + return error_log; } inline LogType& GetLogInstanceForLevel(int level) { diff --git a/src/perf_counters.cc b/src/perf_counters.cc index b2ac768..417acdb 100644 --- a/src/perf_counters.cc +++ b/src/perf_counters.cc @@ -15,6 +15,7 @@ #include "perf_counters.h" #include +#include #include #if defined HAVE_LIBPFM @@ -28,96 +29,228 @@ namespace internal { constexpr size_t PerfCounterValues::kMaxCounters; #if defined HAVE_LIBPFM + +size_t PerfCounterValues::Read(const std::vector& leaders) { + // Create a pointer for multiple reads + const size_t bufsize = values_.size() * sizeof(values_[0]); + char* ptr = reinterpret_cast(values_.data()); + size_t size = bufsize; + for (int lead : leaders) { + auto read_bytes = ::read(lead, ptr, size); + if (read_bytes >= ssize_t(sizeof(uint64_t))) { + // Actual data bytes are all bytes minus initial padding + std::size_t data_bytes = read_bytes - sizeof(uint64_t); + // This should be very cheap since it's in hot cache + std::memmove(ptr, ptr + sizeof(uint64_t), data_bytes); + // Increment our counters + ptr += data_bytes; + size -= data_bytes; + } else { + int err = errno; + GetErrorLogInstance() << "Error reading lead " << lead << " errno:" << err + << " " << ::strerror(err) << "\n"; + return 0; + } + } + return (bufsize - size) / sizeof(uint64_t); +} + const bool PerfCounters::kSupported = true; -bool PerfCounters::Initialize() { return pfm_initialize() == PFM_SUCCESS; } +// Initializes libpfm only on the first call. Returns whether that single +// initialization was successful. +bool PerfCounters::Initialize() { + // Function-scope static gets initialized only once on first call. + static const bool success = []() { + return pfm_initialize() == PFM_SUCCESS; + }(); + return success; +} + +bool PerfCounters::IsCounterSupported(const std::string& name) { + Initialize(); + perf_event_attr_t attr; + std::memset(&attr, 0, sizeof(attr)); + pfm_perf_encode_arg_t arg; + std::memset(&arg, 0, sizeof(arg)); + arg.attr = &attr; + const int mode = PFM_PLM3; // user mode only + int ret = pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT_EXT, + &arg); + return (ret == PFM_SUCCESS); +} PerfCounters PerfCounters::Create( const std::vector& counter_names) { - if (counter_names.empty()) { - return NoCounters(); + if (!counter_names.empty()) { + Initialize(); } - if (counter_names.size() > PerfCounterValues::kMaxCounters) { - GetErrorLogInstance() - << counter_names.size() - << " counters were requested. The minimum is 1, the maximum is " - << PerfCounterValues::kMaxCounters << "\n"; - return NoCounters(); - } - std::vector counter_ids(counter_names.size()); - const int mode = PFM_PLM3; // user mode only + // Valid counters will populate these arrays but we start empty + std::vector valid_names; + std::vector counter_ids; + std::vector leader_ids; + + // Resize to the maximum possible + valid_names.reserve(counter_names.size()); + counter_ids.reserve(counter_names.size()); + + const int kCounterMode = PFM_PLM3; // user mode only + + // Group leads will be assigned on demand. The idea is that once we cannot + // create a counter descriptor, the reason is that this group has maxed out + // so we set the group_id again to -1 and retry - giving the algorithm a + // chance to create a new group leader to hold the next set of counters. + int group_id = -1; + + // Loop through all performance counters for (size_t i = 0; i < counter_names.size(); ++i) { - const bool is_first = i == 0; - struct perf_event_attr attr {}; - attr.size = sizeof(attr); - const int group_id = !is_first ? counter_ids[0] : -1; + // we are about to push into the valid names vector + // check if we did not reach the maximum + if (valid_names.size() == PerfCounterValues::kMaxCounters) { + // Log a message if we maxed out and stop adding + GetErrorLogInstance() + << counter_names.size() << " counters were requested. The maximum is " + << PerfCounterValues::kMaxCounters << " and " << valid_names.size() + << " were already added. All remaining counters will be ignored\n"; + // stop the loop and return what we have already + break; + } + + // Check if this name is empty const auto& name = counter_names[i]; if (name.empty()) { - GetErrorLogInstance() << "A counter name was the empty string\n"; - return NoCounters(); + GetErrorLogInstance() + << "A performance counter name was the empty string\n"; + continue; } + + // Here first means first in group, ie the group leader + const bool is_first = (group_id < 0); + + // This struct will be populated by libpfm from the counter string + // and then fed into the syscall perf_event_open + struct perf_event_attr attr {}; + attr.size = sizeof(attr); + + // This is the input struct to libpfm. pfm_perf_encode_arg_t arg{}; arg.attr = &attr; - - const int pfm_get = - pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT, &arg); + const int pfm_get = pfm_get_os_event_encoding(name.c_str(), kCounterMode, + PFM_OS_PERF_EVENT, &arg); if (pfm_get != PFM_SUCCESS) { - GetErrorLogInstance() << "Unknown counter name: " << name << "\n"; - return NoCounters(); + GetErrorLogInstance() + << "Unknown performance counter name: " << name << "\n"; + continue; } - attr.disabled = is_first; - // Note: the man page for perf_event_create suggests inerit = true and + + // We then proceed to populate the remaining fields in our attribute struct + // Note: the man page for perf_event_create suggests inherit = true and // read_format = PERF_FORMAT_GROUP don't work together, but that's not the // case. + attr.disabled = is_first; attr.inherit = true; attr.pinned = is_first; attr.exclude_kernel = true; attr.exclude_user = false; attr.exclude_hv = true; - // Read all counters in one read. + + // Read all counters in a group in one read. attr.read_format = PERF_FORMAT_GROUP; int id = -1; - static constexpr size_t kNrOfSyscallRetries = 5; - // Retry syscall as it was interrupted often (b/64774091). - for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries; - ++num_retries) { - id = perf_event_open(&attr, 0, -1, group_id, 0); - if (id >= 0 || errno != EINTR) { - break; + while (id < 0) { + static constexpr size_t kNrOfSyscallRetries = 5; + // Retry syscall as it was interrupted often (b/64774091). + for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries; + ++num_retries) { + id = perf_event_open(&attr, 0, -1, group_id, 0); + if (id >= 0 || errno != EINTR) { + break; + } + } + if (id < 0) { + // If the file descriptor is negative we might have reached a limit + // in the current group. Set the group_id to -1 and retry + if (group_id >= 0) { + // Create a new group + group_id = -1; + } else { + // At this point we have already retried to set a new group id and + // failed. We then give up. + break; + } } } + + // We failed to get a new file descriptor. We might have reached a hard + // hardware limit that cannot be resolved even with group multiplexing if (id < 0) { - GetErrorLogInstance() - << "Failed to get a file descriptor for " << name << "\n"; + GetErrorLogInstance() << "***WARNING** Failed to get a file descriptor " + "for performance counter " + << name << ". Ignoring\n"; + + // We give up on this counter but try to keep going + // as the others would be fine + continue; + } + if (group_id < 0) { + // This is a leader, store and assign it to the current file descriptor + leader_ids.push_back(id); + group_id = id; + } + // This is a valid counter, add it to our descriptor's list + counter_ids.push_back(id); + valid_names.push_back(name); + } + + // Loop through all group leaders activating them + // There is another option of starting ALL counters in a process but + // that would be far reaching an intrusion. If the user is using PMCs + // by themselves then this would have a side effect on them. It is + // friendlier to loop through all groups individually. + for (int lead : leader_ids) { + if (ioctl(lead, PERF_EVENT_IOC_ENABLE) != 0) { + // This should never happen but if it does, we give up on the + // entire batch as recovery would be a mess. + GetErrorLogInstance() << "***WARNING*** Failed to start counters. " + "Claring out all counters.\n"; + + // Close all peformance counters + for (int id : counter_ids) { + ::close(id); + } + + // Return an empty object so our internal state is still good and + // the process can continue normally without impact return NoCounters(); } - - counter_ids[i] = id; - } - if (ioctl(counter_ids[0], PERF_EVENT_IOC_ENABLE) != 0) { - GetErrorLogInstance() << "Failed to start counters\n"; - return NoCounters(); } - return PerfCounters(counter_names, std::move(counter_ids)); + return PerfCounters(std::move(valid_names), std::move(counter_ids), + std::move(leader_ids)); } -PerfCounters::~PerfCounters() { +void PerfCounters::CloseCounters() const { if (counter_ids_.empty()) { return; } - ioctl(counter_ids_[0], PERF_EVENT_IOC_DISABLE); + for (int lead : leader_ids_) { + ioctl(lead, PERF_EVENT_IOC_DISABLE); + } for (int fd : counter_ids_) { close(fd); } } #else // defined HAVE_LIBPFM +size_t PerfCounterValues::Read(const std::vector&) { return 0; } + const bool PerfCounters::kSupported = false; bool PerfCounters::Initialize() { return false; } +bool PerfCounters::IsCounterSupported(const std::string&) { return false; } + PerfCounters PerfCounters::Create( const std::vector& counter_names) { if (!counter_names.empty()) { @@ -126,7 +259,24 @@ PerfCounters PerfCounters::Create( return NoCounters(); } -PerfCounters::~PerfCounters() = default; +void PerfCounters::CloseCounters() const {} #endif // defined HAVE_LIBPFM + +PerfCountersMeasurement::PerfCountersMeasurement( + const std::vector& counter_names) + : start_values_(counter_names.size()), end_values_(counter_names.size()) { + counters_ = PerfCounters::Create(counter_names); +} + +PerfCounters& PerfCounters::operator=(PerfCounters&& other) noexcept { + if (this != &other) { + CloseCounters(); + + counter_ids_ = std::move(other.counter_ids_); + leader_ids_ = std::move(other.leader_ids_); + counter_names_ = std::move(other.counter_names_); + } + return *this; +} } // namespace internal } // namespace benchmark diff --git a/src/perf_counters.h b/src/perf_counters.h index 47ca138..bf5eb6b 100644 --- a/src/perf_counters.h +++ b/src/perf_counters.h @@ -17,16 +17,25 @@ #include #include +#include +#include #include #include "benchmark/benchmark.h" #include "check.h" #include "log.h" +#include "mutex.h" #ifndef BENCHMARK_OS_WINDOWS #include #endif +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + namespace benchmark { namespace internal { @@ -36,18 +45,21 @@ namespace internal { // The implementation ensures the storage is inlined, and allows 0-based // indexing into the counter values. // The object is used in conjunction with a PerfCounters object, by passing it -// to Snapshot(). The values are populated such that -// perfCounters->names()[i]'s value is obtained at position i (as given by -// operator[]) of this object. -class PerfCounterValues { +// to Snapshot(). The Read() method relocates individual reads, discarding +// the initial padding from each group leader in the values buffer such that +// all user accesses through the [] operator are correct. +class BENCHMARK_EXPORT PerfCounterValues { public: explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) { BM_CHECK_LE(nr_counters_, kMaxCounters); } - uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; } + // We are reading correctly now so the values don't need to skip padding + uint64_t operator[](size_t pos) const { return values_[pos]; } - static constexpr size_t kMaxCounters = 3; + // Increased the maximum to 32 only since the buffer + // is std::array<> backed + static constexpr size_t kMaxCounters = 32; private: friend class PerfCounters; @@ -58,7 +70,14 @@ class PerfCounterValues { sizeof(uint64_t) * (kPadding + nr_counters_)}; } - static constexpr size_t kPadding = 1; + // This reading is complex and as the goal of this class is to + // abstract away the intrincacies of the reading process, this is + // a better place for it + size_t Read(const std::vector& leaders); + + // Move the padding to 2 due to the reading algorithm (1st padding plus a + // current read padding) + static constexpr size_t kPadding = 2; std::array values_; const size_t nr_counters_; }; @@ -66,27 +85,34 @@ class PerfCounterValues { // Collect PMU counters. The object, once constructed, is ready to be used by // calling read(). PMU counter collection is enabled from the time create() is // called, to obtain the object, until the object's destructor is called. -class PerfCounters final { +class BENCHMARK_EXPORT PerfCounters final { public: // True iff this platform supports performance counters. static const bool kSupported; - bool IsValid() const { return is_valid_; } + // Returns an empty object static PerfCounters NoCounters() { return PerfCounters(); } - ~PerfCounters(); + ~PerfCounters() { CloseCounters(); } + PerfCounters() = default; PerfCounters(PerfCounters&&) = default; PerfCounters(const PerfCounters&) = delete; + PerfCounters& operator=(PerfCounters&&) noexcept; + PerfCounters& operator=(const PerfCounters&) = delete; // Platform-specific implementations may choose to do some library // initialization here. static bool Initialize(); + // Check if the given counter is supported, if the app wants to + // check before passing + static bool IsCounterSupported(const std::string& name); + // Return a PerfCounters object ready to read the counters with the names // specified. The values are user-mode only. The counter name format is // implementation and OS specific. - // TODO: once we move to C++-17, this should be a std::optional, and then the - // IsValid() boolean can be dropped. + // In case of failure, this method will in the worst case return an + // empty object whose state will still be valid. static PerfCounters Create(const std::vector& counter_names); // Take a snapshot of the current value of the counters into the provided @@ -95,10 +121,7 @@ class PerfCounters final { BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const { #ifndef BENCHMARK_OS_WINDOWS assert(values != nullptr); - assert(IsValid()); - auto buffer = values->get_data_buffer(); - auto read_bytes = ::read(counter_ids_[0], buffer.first, buffer.second); - return static_cast(read_bytes) == buffer.second; + return values->Read(leader_ids_) == counter_ids_.size(); #else (void)values; return false; @@ -110,63 +133,68 @@ class PerfCounters final { private: PerfCounters(const std::vector& counter_names, - std::vector&& counter_ids) + std::vector&& counter_ids, std::vector&& leader_ids) : counter_ids_(std::move(counter_ids)), - counter_names_(counter_names), - is_valid_(true) {} - PerfCounters() : is_valid_(false) {} + leader_ids_(std::move(leader_ids)), + counter_names_(counter_names) {} + + void CloseCounters() const; std::vector counter_ids_; - const std::vector counter_names_; - const bool is_valid_; + std::vector leader_ids_; + std::vector counter_names_; }; // Typical usage of the above primitives. -class PerfCountersMeasurement final { +class BENCHMARK_EXPORT PerfCountersMeasurement final { public: - PerfCountersMeasurement(PerfCounters&& c) - : counters_(std::move(c)), - start_values_(counters_.IsValid() ? counters_.names().size() : 0), - end_values_(counters_.IsValid() ? counters_.names().size() : 0) {} + PerfCountersMeasurement(const std::vector& counter_names); - bool IsValid() const { return counters_.IsValid(); } + size_t num_counters() const { return counters_.num_counters(); } - BENCHMARK_ALWAYS_INLINE void Start() { - assert(IsValid()); + std::vector names() const { return counters_.names(); } + + BENCHMARK_ALWAYS_INLINE bool Start() { + if (num_counters() == 0) return true; // Tell the compiler to not move instructions above/below where we take // the snapshot. ClobberMemory(); - counters_.Snapshot(&start_values_); + valid_read_ &= counters_.Snapshot(&start_values_); ClobberMemory(); + + return valid_read_; } - BENCHMARK_ALWAYS_INLINE std::vector> - StopAndGetMeasurements() { - assert(IsValid()); + BENCHMARK_ALWAYS_INLINE bool Stop( + std::vector>& measurements) { + if (num_counters() == 0) return true; // Tell the compiler to not move instructions above/below where we take // the snapshot. ClobberMemory(); - counters_.Snapshot(&end_values_); + valid_read_ &= counters_.Snapshot(&end_values_); ClobberMemory(); - std::vector> ret; for (size_t i = 0; i < counters_.names().size(); ++i) { double measurement = static_cast(end_values_[i]) - static_cast(start_values_[i]); - ret.push_back({counters_.names()[i], measurement}); + measurements.push_back({counters_.names()[i], measurement}); } - return ret; + + return valid_read_; } private: PerfCounters counters_; + bool valid_read_ = true; PerfCounterValues start_values_; PerfCounterValues end_values_; }; -BENCHMARK_UNUSED static bool perf_init_anchor = PerfCounters::Initialize(); - } // namespace internal } // namespace benchmark +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + #endif // BENCHMARK_PERF_COUNTERS_H diff --git a/src/re.h b/src/re.h index 6300467..9afb869 100644 --- a/src/re.h +++ b/src/re.h @@ -33,7 +33,7 @@ // Prefer C regex libraries when compiling w/o exceptions so that we can // correctly report errors. #if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \ - defined(BENCHMARK_HAVE_STD_REGEX) && \ + defined(HAVE_STD_REGEX) && \ (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) #undef HAVE_STD_REGEX #endif diff --git a/src/reporter.cc b/src/reporter.cc index 1d2df17..076bc31 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -25,9 +25,6 @@ #include "timers.h" namespace benchmark { -namespace internal { -extern std::map *global_context; -} BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr) {} @@ -39,7 +36,11 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, BM_CHECK(out) << "cannot be null"; auto &Out = *out; +#ifndef BENCHMARK_OS_QURT + // Date/time information is not available on QuRT. + // Attempting to get it via this call cause the binary to crash. Out << LocalDateTimeString() << "\n"; +#endif if (context.executable_name) Out << "Running " << context.executable_name << "\n"; @@ -67,8 +68,11 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, Out << "\n"; } - if (internal::global_context != nullptr) { - for (const auto &kv : *internal::global_context) { + std::map *global_context = + internal::GetGlobalContext(); + + if (global_context != nullptr) { + for (const auto &kv : *global_context) { Out << kv.first << ": " << kv.second << "\n"; } } diff --git a/src/sleep.cc b/src/sleep.cc deleted file mode 100644 index ab59000..0000000 --- a/src/sleep.cc +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sleep.h" - -#include -#include -#include - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#endif - -#ifdef BENCHMARK_OS_ZOS -#include -#endif - -namespace benchmark { -#ifdef BENCHMARK_OS_WINDOWS -// Window's Sleep takes milliseconds argument. -void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } -void SleepForSeconds(double seconds) { - SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); -} -#else // BENCHMARK_OS_WINDOWS -void SleepForMicroseconds(int microseconds) { -#ifdef BENCHMARK_OS_ZOS - // z/OS does not support nanosleep. Instead call sleep() and then usleep() to - // sleep for the remaining microseconds because usleep() will fail if its - // argument is greater than 1000000. - div_t sleepTime = div(microseconds, kNumMicrosPerSecond); - int seconds = sleepTime.quot; - while (seconds != 0) seconds = sleep(seconds); - while (usleep(sleepTime.rem) == -1 && errno == EINTR) - ; -#else - struct timespec sleep_time; - sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; - sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; - while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) - ; // Ignore signals and wait for the full interval to elapse. -#endif -} - -void SleepForMilliseconds(int milliseconds) { - SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); -} - -void SleepForSeconds(double seconds) { - SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); -} -#endif // BENCHMARK_OS_WINDOWS -} // end namespace benchmark diff --git a/src/sleep.h b/src/sleep.h deleted file mode 100644 index f98551a..0000000 --- a/src/sleep.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef BENCHMARK_SLEEP_H_ -#define BENCHMARK_SLEEP_H_ - -namespace benchmark { -const int kNumMillisPerSecond = 1000; -const int kNumMicrosPerMilli = 1000; -const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; -const int kNumNanosPerMicro = 1000; -const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; - -void SleepForMilliseconds(int milliseconds); -void SleepForSeconds(double seconds); -} // end namespace benchmark - -#endif // BENCHMARK_SLEEP_H_ diff --git a/src/statistics.cc b/src/statistics.cc index 3e5ef09..844e926 100644 --- a/src/statistics.cc +++ b/src/statistics.cc @@ -42,13 +42,13 @@ double StatisticsMedian(const std::vector& v) { auto center = copy.begin() + v.size() / 2; std::nth_element(copy.begin(), center, copy.end()); - // did we have an odd number of samples? - // if yes, then center is the median - // it no, then we are looking for the average between center and the value - // before + // Did we have an odd number of samples? If yes, then center is the median. + // If not, then we are looking for the average between center and the value + // before. Instead of resorting, we just look for the max value before it, + // which is not necessarily the element immediately preceding `center` Since + // `copy` is only partially sorted by `nth_element`. if (v.size() % 2 == 1) return *center; - auto center2 = copy.begin() + v.size() / 2 - 1; - std::nth_element(copy.begin(), center2, copy.end()); + auto center2 = std::max_element(copy.begin(), center); return (*center + *center2) / 2.0; } @@ -89,9 +89,8 @@ std::vector ComputeStats( typedef BenchmarkReporter::Run Run; std::vector results; - auto error_count = - std::count_if(reports.begin(), reports.end(), - [](Run const& run) { return run.error_occurred; }); + auto error_count = std::count_if(reports.begin(), reports.end(), + [](Run const& run) { return run.skipped; }); if (reports.size() - error_count < 2) { // We don't report aggregated data if there was a single run. @@ -118,11 +117,13 @@ std::vector ComputeStats( for (auto const& cnt : r.counters) { auto it = counter_stats.find(cnt.first); if (it == counter_stats.end()) { - counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); - it = counter_stats.find(cnt.first); + it = counter_stats + .emplace(cnt.first, + CounterStat{cnt.second, std::vector{}}) + .first; it->second.s.reserve(reports.size()); } else { - BM_CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); + BM_CHECK_EQ(it->second.c.flags, cnt.second.flags); } } } @@ -131,7 +132,7 @@ std::vector ComputeStats( for (Run const& run : reports) { BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); BM_CHECK_EQ(run_iterations, run.iterations); - if (run.error_occurred) continue; + if (run.skipped) continue; real_accumulated_time_stat.emplace_back(run.real_accumulated_time); cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); // user counters diff --git a/src/statistics.h b/src/statistics.h index a9545a5..6e5560e 100644 --- a/src/statistics.h +++ b/src/statistics.h @@ -22,15 +22,21 @@ namespace benchmark { -// Return a vector containing the mean, median and standard devation information -// (and any user-specified info) for the specified list of reports. If 'reports' -// contains less than two non-errored runs an empty vector is returned +// Return a vector containing the mean, median and standard deviation +// information (and any user-specified info) for the specified list of reports. +// If 'reports' contains less than two non-errored runs an empty vector is +// returned +BENCHMARK_EXPORT std::vector ComputeStats( const std::vector& reports); +BENCHMARK_EXPORT double StatisticsMean(const std::vector& v); +BENCHMARK_EXPORT double StatisticsMedian(const std::vector& v); +BENCHMARK_EXPORT double StatisticsStdDev(const std::vector& v); +BENCHMARK_EXPORT double StatisticsCV(const std::vector& v); } // end namespace benchmark diff --git a/src/string_util.cc b/src/string_util.cc index 401fa13..c69e40a 100644 --- a/src/string_util.cc +++ b/src/string_util.cc @@ -11,16 +11,17 @@ #include #include "arraysize.h" +#include "benchmark/benchmark.h" namespace benchmark { namespace { - // kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta. -const char kBigSIUnits[] = "kMGTPEZY"; +const char* const kBigSIUnits[] = {"k", "M", "G", "T", "P", "E", "Z", "Y"}; // Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi. -const char kBigIECUnits[] = "KMGTPEZY"; +const char* const kBigIECUnits[] = {"Ki", "Mi", "Gi", "Ti", + "Pi", "Ei", "Zi", "Yi"}; // milli, micro, nano, pico, femto, atto, zepto, yocto. -const char kSmallSIUnits[] = "munpfazy"; +const char* const kSmallSIUnits[] = {"m", "u", "n", "p", "f", "a", "z", "y"}; // We require that all three arrays have the same size. static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), @@ -30,9 +31,8 @@ static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits), static const int64_t kUnitsSize = arraysize(kBigSIUnits); -void ToExponentAndMantissa(double val, double thresh, int precision, - double one_k, std::string* mantissa, - int64_t* exponent) { +void ToExponentAndMantissa(double val, int precision, double one_k, + std::string* mantissa, int64_t* exponent) { std::stringstream mantissa_stream; if (val < 0) { @@ -43,8 +43,8 @@ void ToExponentAndMantissa(double val, double thresh, int precision, // Adjust threshold so that it never excludes things which can't be rendered // in 'precision' digits. const double adjusted_threshold = - std::max(thresh, 1.0 / std::pow(10.0, precision)); - const double big_threshold = adjusted_threshold * one_k; + std::max(1.0, 1.0 / std::pow(10.0, precision)); + const double big_threshold = (adjusted_threshold * one_k) - 1; const double small_threshold = adjusted_threshold; // Values in ]simple_threshold,small_threshold[ will be printed as-is const double simple_threshold = 0.01; @@ -92,37 +92,20 @@ std::string ExponentToPrefix(int64_t exponent, bool iec) { const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1); if (index >= kUnitsSize) return ""; - const char* array = + const char* const* array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits); - if (iec) - return array[index] + std::string("i"); - else - return std::string(1, array[index]); + + return std::string(array[index]); } -std::string ToBinaryStringFullySpecified(double value, double threshold, - int precision, double one_k = 1024.0) { +std::string ToBinaryStringFullySpecified(double value, int precision, + Counter::OneK one_k) { std::string mantissa; int64_t exponent; - ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, + ToExponentAndMantissa(value, precision, + one_k == Counter::kIs1024 ? 1024.0 : 1000.0, &mantissa, &exponent); - return mantissa + ExponentToPrefix(exponent, false); -} - -} // end namespace - -void AppendHumanReadable(int n, std::string* str) { - std::stringstream ss; - // Round down to the nearest SI prefix. - ss << ToBinaryStringFullySpecified(n, 1.0, 0); - *str += ss.str(); -} - -std::string HumanReadableNumber(double n, double one_k) { - // 1.1 means that figures up to 1.1k should be shown with the next unit down; - // this softens edge effects. - // 1 means that we should show one decimal place of precision. - return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); + return mantissa + ExponentToPrefix(exponent, one_k == Counter::kIs1024); } std::string StrFormatImp(const char* msg, va_list args) { @@ -133,21 +116,21 @@ std::string StrFormatImp(const char* msg, va_list args) { // TODO(ericwf): use std::array for first attempt to avoid one memory // allocation guess what the size might be std::array local_buff; - std::size_t size = local_buff.size(); + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // in the android-ndk - auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); + auto ret = vsnprintf(local_buff.data(), local_buff.size(), msg, args_cp); va_end(args_cp); // handle empty expansion if (ret == 0) return std::string{}; - if (static_cast(ret) < size) + if (static_cast(ret) < local_buff.size()) return std::string(local_buff.data()); // we did not provide a long enough buffer on our first attempt. // add 1 to size to account for null-byte in size cast to prevent overflow - size = static_cast(ret) + 1; + std::size_t size = static_cast(ret) + 1; auto buff_ptr = std::unique_ptr(new char[size]); // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // in the android-ndk @@ -155,6 +138,12 @@ std::string StrFormatImp(const char* msg, va_list args) { return std::string(buff_ptr.get()); } +} // end namespace + +std::string HumanReadableNumber(double n, Counter::OneK one_k) { + return ToBinaryStringFullySpecified(n, 1, one_k); +} + std::string StrFormat(const char* format, ...) { va_list args; va_start(args, format); diff --git a/src/string_util.h b/src/string_util.h index ff3b7da..731aa2c 100644 --- a/src/string_util.h +++ b/src/string_util.h @@ -4,15 +4,19 @@ #include #include #include +#include +#include "benchmark/benchmark.h" +#include "benchmark/export.h" +#include "check.h" #include "internal_macros.h" namespace benchmark { -void AppendHumanReadable(int n, std::string* str); - -std::string HumanReadableNumber(double n, double one_k = 1024.0); +BENCHMARK_EXPORT +std::string HumanReadableNumber(double n, Counter::OneK one_k); +BENCHMARK_EXPORT #if defined(__MINGW32__) __attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) #elif defined(__GNUC__) @@ -38,6 +42,7 @@ inline std::string StrCat(Args&&... args) { return ss.str(); } +BENCHMARK_EXPORT std::vector StrSplit(const std::string& str, char delim); // Disable lint checking for this block since it re-implements C functions. diff --git a/src/sysinfo.cc b/src/sysinfo.cc index 87dcfb4..922e83a 100644 --- a/src/sysinfo.cc +++ b/src/sysinfo.cc @@ -23,7 +23,7 @@ #include #else #include -#ifndef BENCHMARK_OS_FUCHSIA +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) #include #endif #include @@ -38,10 +38,17 @@ #endif #if defined(BENCHMARK_OS_SOLARIS) #include +#include #endif #if defined(BENCHMARK_OS_QNX) #include #endif +#if defined(BENCHMARK_OS_QURT) +#include +#endif +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) +#include +#endif #include #include @@ -58,15 +65,17 @@ #include #include #include +#include #include #include +#include "benchmark/benchmark.h" #include "check.h" #include "cycleclock.h" #include "internal_macros.h" #include "log.h" -#include "sleep.h" #include "string_util.h" +#include "timers.h" namespace benchmark { namespace { @@ -91,67 +100,59 @@ BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { /// `sysctl` with the result type it's to be interpreted as. struct ValueUnion { union DataT { - uint32_t uint32_value; - uint64_t uint64_value; + int32_t int32_value; + int64_t int64_value; // For correct aliasing of union members from bytes. char bytes[8]; }; using DataPtr = std::unique_ptr; // The size of the data union member + its trailing array size. - size_t Size; - DataPtr Buff; + std::size_t size; + DataPtr buff; public: - ValueUnion() : Size(0), Buff(nullptr, &std::free) {} + ValueUnion() : size(0), buff(nullptr, &std::free) {} - explicit ValueUnion(size_t BuffSize) - : Size(sizeof(DataT) + BuffSize), - Buff(::new (std::malloc(Size)) DataT(), &std::free) {} + explicit ValueUnion(std::size_t buff_size) + : size(sizeof(DataT) + buff_size), + buff(::new (std::malloc(size)) DataT(), &std::free) {} ValueUnion(ValueUnion&& other) = default; - explicit operator bool() const { return bool(Buff); } + explicit operator bool() const { return bool(buff); } - char* data() const { return Buff->bytes; } + char* data() const { return buff->bytes; } std::string GetAsString() const { return std::string(data()); } int64_t GetAsInteger() const { - if (Size == sizeof(Buff->uint32_value)) - return static_cast(Buff->uint32_value); - else if (Size == sizeof(Buff->uint64_value)) - return static_cast(Buff->uint64_value); - BENCHMARK_UNREACHABLE(); - } - - uint64_t GetAsUnsigned() const { - if (Size == sizeof(Buff->uint32_value)) - return Buff->uint32_value; - else if (Size == sizeof(Buff->uint64_value)) - return Buff->uint64_value; + if (size == sizeof(buff->int32_value)) + return buff->int32_value; + else if (size == sizeof(buff->int64_value)) + return buff->int64_value; BENCHMARK_UNREACHABLE(); } template std::array GetAsArray() { - const int ArrSize = sizeof(T) * N; - BM_CHECK_LE(ArrSize, Size); - std::array Arr; - std::memcpy(Arr.data(), data(), ArrSize); - return Arr; + const int arr_size = sizeof(T) * N; + BM_CHECK_LE(arr_size, size); + std::array arr; + std::memcpy(arr.data(), data(), arr_size); + return arr; } }; -ValueUnion GetSysctlImp(std::string const& Name) { +ValueUnion GetSysctlImp(std::string const& name) { #if defined BENCHMARK_OS_OPENBSD int mib[2]; mib[0] = CTL_HW; - if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")) { + if ((name == "hw.ncpu") || (name == "hw.cpuspeed")) { ValueUnion buff(sizeof(int)); - if (Name == "hw.ncpu") { + if (name == "hw.ncpu") { mib[1] = HW_NCPU; } else { mib[1] = HW_CPUSPEED; @@ -164,41 +165,41 @@ ValueUnion GetSysctlImp(std::string const& Name) { } return ValueUnion(); #else - size_t CurBuffSize = 0; - if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) + std::size_t cur_buff_size = 0; + if (sysctlbyname(name.c_str(), nullptr, &cur_buff_size, nullptr, 0) == -1) return ValueUnion(); - ValueUnion buff(CurBuffSize); - if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) + ValueUnion buff(cur_buff_size); + if (sysctlbyname(name.c_str(), buff.data(), &buff.size, nullptr, 0) == 0) return buff; return ValueUnion(); #endif } BENCHMARK_MAYBE_UNUSED -bool GetSysctl(std::string const& Name, std::string* Out) { - Out->clear(); - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - Out->assign(Buff.data()); +bool GetSysctl(std::string const& name, std::string* out) { + out->clear(); + auto buff = GetSysctlImp(name); + if (!buff) return false; + out->assign(buff.data()); return true; } template ::value>::type> -bool GetSysctl(std::string const& Name, Tp* Out) { - *Out = 0; - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = static_cast(Buff.GetAsUnsigned()); +bool GetSysctl(std::string const& name, Tp* out) { + *out = 0; + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = static_cast(buff.GetAsInteger()); return true; } template -bool GetSysctl(std::string const& Name, std::array* Out) { - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = Buff.GetAsArray(); +bool GetSysctl(std::string const& name, std::array* out) { + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = buff.GetAsArray(); return true; } #endif @@ -234,21 +235,21 @@ CPUInfo::Scaling CpuScaling(int num_cpus) { #endif } -int CountSetBitsInCPUMap(std::string Val) { - auto CountBits = [](std::string Part) { +int CountSetBitsInCPUMap(std::string val) { + auto CountBits = [](std::string part) { using CPUMask = std::bitset; - Part = "0x" + Part; - CPUMask Mask(benchmark::stoul(Part, nullptr, 16)); - return static_cast(Mask.count()); + part = "0x" + part; + CPUMask mask(benchmark::stoul(part, nullptr, 16)); + return static_cast(mask.count()); }; - size_t Pos; + std::size_t pos; int total = 0; - while ((Pos = Val.find(',')) != std::string::npos) { - total += CountBits(Val.substr(0, Pos)); - Val = Val.substr(Pos + 1); + while ((pos = val.find(',')) != std::string::npos) { + total += CountBits(val.substr(0, pos)); + val = val.substr(pos + 1); } - if (!Val.empty()) { - total += CountBits(Val); + if (!val.empty()) { + total += CountBits(val); } return total; } @@ -257,16 +258,16 @@ BENCHMARK_MAYBE_UNUSED std::vector GetCacheSizesFromKVFS() { std::vector res; std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; - int Idx = 0; + int idx = 0; while (true) { CPUInfo::CacheInfo info; - std::string FPath = StrCat(dir, "index", Idx++, "/"); - std::ifstream f(StrCat(FPath, "size").c_str()); + std::string fpath = StrCat(dir, "index", idx++, "/"); + std::ifstream f(StrCat(fpath, "size").c_str()); if (!f.is_open()) break; std::string suffix; f >> info.size; if (f.fail()) - PrintErrorAndDie("Failed while reading file '", FPath, "size'"); + PrintErrorAndDie("Failed while reading file '", fpath, "size'"); if (f.good()) { f >> suffix; if (f.bad()) @@ -277,13 +278,13 @@ std::vector GetCacheSizesFromKVFS() { else if (suffix == "K") info.size *= 1024; } - if (!ReadFromFile(StrCat(FPath, "type"), &info.type)) - PrintErrorAndDie("Failed to read from file ", FPath, "type"); - if (!ReadFromFile(StrCat(FPath, "level"), &info.level)) - PrintErrorAndDie("Failed to read from file ", FPath, "level"); + if (!ReadFromFile(StrCat(fpath, "type"), &info.type)) + PrintErrorAndDie("Failed to read from file ", fpath, "type"); + if (!ReadFromFile(StrCat(fpath, "level"), &info.level)) + PrintErrorAndDie("Failed to read from file ", fpath, "level"); std::string map_str; - if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str)) - PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map"); + if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str)) + PrintErrorAndDie("Failed to read from file ", fpath, "shared_cpu_map"); info.num_sharing = CountSetBitsInCPUMap(map_str); res.push_back(info); } @@ -294,26 +295,26 @@ std::vector GetCacheSizesFromKVFS() { #ifdef BENCHMARK_OS_MACOSX std::vector GetCacheSizesMacOSX() { std::vector res; - std::array CacheCounts{{0, 0, 0, 0}}; - GetSysctl("hw.cacheconfig", &CacheCounts); + std::array cache_counts{{0, 0, 0, 0}}; + GetSysctl("hw.cacheconfig", &cache_counts); struct { std::string name; std::string type; int level; - uint64_t num_sharing; - } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]}, - {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, - {"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, - {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; - for (auto& C : Cases) { + int num_sharing; + } cases[] = {{"hw.l1dcachesize", "Data", 1, cache_counts[1]}, + {"hw.l1icachesize", "Instruction", 1, cache_counts[1]}, + {"hw.l2cachesize", "Unified", 2, cache_counts[2]}, + {"hw.l3cachesize", "Unified", 3, cache_counts[3]}}; + for (auto& c : cases) { int val; - if (!GetSysctl(C.name, &val)) continue; + if (!GetSysctl(c.name, &val)) continue; CPUInfo::CacheInfo info; - info.type = C.type; - info.level = C.level; + info.type = c.type; + info.level = c.level; info.size = val; - info.num_sharing = static_cast(C.num_sharing); + info.num_sharing = c.num_sharing; res.push_back(std::move(info)); } return res; @@ -327,7 +328,7 @@ std::vector GetCacheSizesWindows() { using UPtr = std::unique_ptr; GetLogicalProcessorInformation(nullptr, &buffer_size); - UPtr buff((PInfo*)malloc(buffer_size), &std::free); + UPtr buff(static_cast(std::malloc(buffer_size)), &std::free); if (!GetLogicalProcessorInformation(buff.get(), &buffer_size)) PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", GetLastError()); @@ -338,16 +339,16 @@ std::vector GetCacheSizesWindows() { for (; it != end; ++it) { if (it->Relationship != RelationCache) continue; using BitSet = std::bitset; - BitSet B(it->ProcessorMask); + BitSet b(it->ProcessorMask); // To prevent duplicates, only consider caches where CPU 0 is specified - if (!B.test(0)) continue; - CInfo* Cache = &it->Cache; + if (!b.test(0)) continue; + const CInfo& cache = it->Cache; CPUInfo::CacheInfo C; - C.num_sharing = static_cast(B.count()); - C.level = Cache->Level; - C.size = Cache->Size; + C.num_sharing = static_cast(b.count()); + C.level = cache.Level; + C.size = cache.Size; C.type = "Unknown"; - switch (Cache->Type) { + switch (cache.Type) { case CacheUnified: C.type = "Unified"; break; @@ -410,6 +411,8 @@ std::vector GetCacheSizes() { return GetCacheSizesWindows(); #elif defined(BENCHMARK_OS_QNX) return GetCacheSizesQNX(); +#elif defined(BENCHMARK_OS_QURT) + return std::vector(); #else return GetCacheSizesFromKVFS(); #endif @@ -418,23 +421,32 @@ std::vector GetCacheSizes() { std::string GetSystemName() { #if defined(BENCHMARK_OS_WINDOWS) std::string str; - const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1; + static constexpr int COUNT = MAX_COMPUTERNAME_LENGTH + 1; TCHAR hostname[COUNT] = {'\0'}; DWORD DWCOUNT = COUNT; if (!GetComputerName(hostname, &DWCOUNT)) return std::string(""); #ifndef UNICODE str = std::string(hostname, DWCOUNT); #else - // Using wstring_convert, Is deprecated in C++17 - using convert_type = std::codecvt_utf8; - std::wstring_convert converter; - std::wstring wStr(hostname, DWCOUNT); - str = converter.to_bytes(wStr); + // `WideCharToMultiByte` returns `0` when conversion fails. + int len = WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname, + DWCOUNT, NULL, 0, NULL, NULL); + str.resize(len); + WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, hostname, DWCOUNT, &str[0], + str.size(), NULL, NULL); #endif return str; -#else // defined(BENCHMARK_OS_WINDOWS) +#elif defined(BENCHMARK_OS_QURT) + std::string str = "Hexagon DSP"; + qurt_arch_version_t arch_version_struct; + if (qurt_sysenv_get_arch_version(&arch_version_struct) == QURT_EOK) { + str += " v"; + str += std::to_string(arch_version_struct.arch_version); + } + return str; +#else #ifndef HOST_NAME_MAX -#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined +#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac doesn't have HOST_NAME_MAX defined #define HOST_NAME_MAX 64 #elif defined(BENCHMARK_OS_NACL) #define HOST_NAME_MAX 64 @@ -442,6 +454,8 @@ std::string GetSystemName() { #define HOST_NAME_MAX 154 #elif defined(BENCHMARK_OS_RTEMS) #define HOST_NAME_MAX 256 +#elif defined(BENCHMARK_OS_SOLARIS) +#define HOST_NAME_MAX MAXHOSTNAMELEN #else #pragma message("HOST_NAME_MAX not defined. using 64") #define HOST_NAME_MAX 64 @@ -456,8 +470,8 @@ std::string GetSystemName() { int GetNumCPUs() { #ifdef BENCHMARK_HAS_SYSCTL - int NumCPU = -1; - if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; + int num_cpu = -1; + if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu; fprintf(stderr, "Err: %s\n", strerror(errno)); std::exit(EXIT_FAILURE); #elif defined(BENCHMARK_OS_WINDOWS) @@ -471,17 +485,23 @@ int GetNumCPUs() { // group #elif defined(BENCHMARK_OS_SOLARIS) // Returns -1 in case of a failure. - int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); - if (NumCPU < 0) { + long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); + if (num_cpu < 0) { fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno)); } - return NumCPU; + return (int)num_cpu; #elif defined(BENCHMARK_OS_QNX) return static_cast(_syspage_ptr->num_cpu); +#elif defined(BENCHMARK_OS_QURT) + qurt_sysenv_max_hthreads_t hardware_threads; + if (qurt_sysenv_get_max_hw_threads(&hardware_threads) != QURT_EOK) { + hardware_threads.max_hthreads = 1; + } + return hardware_threads.max_hthreads; #else - int NumCPUs = 0; - int MaxID = -1; + int num_cpus = 0; + int max_id = -1; std::ifstream f("/proc/cpuinfo"); if (!f.is_open()) { std::cerr << "failed to open /proc/cpuinfo\n"; @@ -491,21 +511,21 @@ int GetNumCPUs() { std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); + std::size_t split_idx = ln.find(':'); std::string value; #if defined(__s390__) // s390 has another format in /proc/cpuinfo // it needs to be parsed differently - if (SplitIdx != std::string::npos) - value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1); + if (split_idx != std::string::npos) + value = ln.substr(Key.size() + 1, split_idx - Key.size() - 1); #else - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); #endif if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { - NumCPUs++; + num_cpus++; if (!value.empty()) { - int CurID = benchmark::stoi(value); - MaxID = std::max(CurID, MaxID); + const int cur_id = benchmark::stoi(value); + max_id = std::max(cur_id, max_id); } } } @@ -519,16 +539,90 @@ int GetNumCPUs() { } f.close(); - if ((MaxID + 1) != NumCPUs) { + if ((max_id + 1) != num_cpus) { fprintf(stderr, "CPU ID assignments in /proc/cpuinfo seem messed up." " This is usually caused by a bad BIOS.\n"); } - return NumCPUs; + return num_cpus; #endif BENCHMARK_UNREACHABLE(); } +class ThreadAffinityGuard final { + public: + ThreadAffinityGuard() : reset_affinity(SetAffinity()) { + if (!reset_affinity) + std::cerr << "***WARNING*** Failed to set thread affinity. Estimated CPU " + "frequency may be incorrect." + << std::endl; + } + + ~ThreadAffinityGuard() { + if (!reset_affinity) return; + +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + int ret = pthread_setaffinity_np(self, sizeof(previous_affinity), + &previous_affinity); + if (ret == 0) return; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + DWORD_PTR ret = SetThreadAffinityMask(self, previous_affinity); + if (ret != 0) return; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + PrintErrorAndDie("Failed to reset thread affinity"); + } + + ThreadAffinityGuard(ThreadAffinityGuard&&) = delete; + ThreadAffinityGuard(const ThreadAffinityGuard&) = delete; + ThreadAffinityGuard& operator=(ThreadAffinityGuard&&) = delete; + ThreadAffinityGuard& operator=(const ThreadAffinityGuard&) = delete; + + private: + bool SetAffinity() { +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + int ret; + self = pthread_self(); + ret = pthread_getaffinity_np(self, sizeof(previous_affinity), + &previous_affinity); + if (ret != 0) return false; + + cpu_set_t affinity; + memcpy(&affinity, &previous_affinity, sizeof(affinity)); + + bool is_first_cpu = true; + + for (int i = 0; i < CPU_SETSIZE; ++i) + if (CPU_ISSET(i, &affinity)) { + if (is_first_cpu) + is_first_cpu = false; + else + CPU_CLR(i, &affinity); + } + + if (is_first_cpu) return false; + + ret = pthread_setaffinity_np(self, sizeof(affinity), &affinity); + return ret == 0; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + self = GetCurrentThread(); + DWORD_PTR mask = static_cast(1) << GetCurrentProcessorNumber(); + previous_affinity = SetThreadAffinityMask(self, mask); + return previous_affinity != 0; +#else + return false; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + } + +#if defined(BENCHMARK_HAS_PTHREAD_AFFINITY) + pthread_t self; + cpu_set_t previous_affinity; +#elif defined(BENCHMARK_OS_WINDOWS_WIN32) + HANDLE self; + DWORD_PTR previous_affinity; +#endif // def BENCHMARK_HAS_PTHREAD_AFFINITY + bool reset_affinity; +}; + double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { // Currently, scaling is only used on linux path here, // suppress diagnostics about it being unused on other paths. @@ -569,7 +663,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { return error_value; } - auto startsWithKey = [](std::string const& Value, std::string const& Key) { + auto StartsWithKey = [](std::string const& Value, std::string const& Key) { if (Key.size() > Value.size()) return false; auto Cmp = [&](char X, char Y) { return std::tolower(X) == std::tolower(Y); @@ -580,18 +674,18 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); + std::size_t split_idx = ln.find(':'); std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only // accept positive values. Some environments (virtual machines) report zero, // which would cause infinite looping in WallTime_Init. - if (startsWithKey(ln, "cpu MHz")) { + if (StartsWithKey(ln, "cpu MHz")) { if (!value.empty()) { double cycles_per_second = benchmark::stod(value) * 1000000.0; if (cycles_per_second > 0) return cycles_per_second; } - } else if (startsWithKey(ln, "bogomips")) { + } else if (StartsWithKey(ln, "bogomips")) { if (!value.empty()) { bogo_clock = benchmark::stod(value) * 1000000.0; if (bogo_clock < 0.0) bogo_clock = error_value; @@ -613,7 +707,7 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { if (bogo_clock >= 0.0) return bogo_clock; #elif defined BENCHMARK_HAS_SYSCTL - constexpr auto* FreqStr = + constexpr auto* freqStr = #if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) "machdep.tsc_freq"; #elif defined BENCHMARK_OS_OPENBSD @@ -625,14 +719,17 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { #endif unsigned long long hz = 0; #if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(FreqStr, &hz)) return hz * 1000000; + if (GetSysctl(freqStr, &hz)) return hz * 1000000; #else - if (GetSysctl(FreqStr, &hz)) return hz; + if (GetSysctl(freqStr, &hz)) return hz; #endif fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", - FreqStr, strerror(errno)); + freqStr, strerror(errno)); + fprintf(stderr, + "This does not affect benchmark measurements, only the " + "metadata output.\n"); -#elif defined BENCHMARK_OS_WINDOWS +#elif defined BENCHMARK_OS_WINDOWS_WIN32 // In NT, read MHz from the registry. If we fail to do so or we're in win9x // then make a crude estimate. DWORD data, data_size = sizeof(data); @@ -641,15 +738,16 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { SHGetValueA(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", "~MHz", nullptr, &data, &data_size))) - return static_cast((int64_t)data * - (int64_t)(1000 * 1000)); // was mhz + return static_cast(static_cast(data) * + static_cast(1000 * 1000)); // was mhz #elif defined(BENCHMARK_OS_SOLARIS) kstat_ctl_t* kc = kstat_open(); if (!kc) { std::cerr << "failed to open /dev/kstat\n"; return -1; } - kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); + kstat_t* ksp = kstat_lookup(kc, const_cast("cpu_info"), -1, + const_cast("cpu_info0")); if (!ksp) { std::cerr << "failed to lookup in /dev/kstat\n"; return -1; @@ -658,8 +756,8 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { std::cerr << "failed to read from /dev/kstat\n"; return -1; } - kstat_named_t* knp = - (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); + kstat_named_t* knp = (kstat_named_t*)kstat_data_lookup( + ksp, const_cast("current_clock_Hz")); if (!knp) { std::cerr << "failed to lookup data in /dev/kstat\n"; return -1; @@ -675,20 +773,52 @@ double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { #elif defined(BENCHMARK_OS_QNX) return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * (int64_t)(1000 * 1000)); +#elif defined(BENCHMARK_OS_QURT) + // QuRT doesn't provide any API to query Hexagon frequency. + return 1000000000; #endif // If we've fallen through, attempt to roughly estimate the CPU clock rate. - const int estimate_time_ms = 1000; + + // Make sure to use the same cycle counter when starting and stopping the + // cycle timer. We just pin the current thread to a cpu in the previous + // affinity set. + ThreadAffinityGuard affinity_guard; + + static constexpr double estimate_time_s = 1.0; + const double start_time = ChronoClockNow(); const auto start_ticks = cycleclock::Now(); - SleepForMilliseconds(estimate_time_ms); - return static_cast(cycleclock::Now() - start_ticks); + + // Impose load instead of calling sleep() to make sure the cycle counter + // works. + using PRNG = std::minstd_rand; + using Result = PRNG::result_type; + PRNG rng(static_cast(start_ticks)); + + Result state = 0; + + do { + static constexpr size_t batch_size = 10000; + rng.discard(batch_size); + state += rng(); + + } while (ChronoClockNow() - start_time < estimate_time_s); + + DoNotOptimize(state); + + const auto end_ticks = cycleclock::Now(); + const double end_time = ChronoClockNow(); + + return static_cast(end_ticks - start_ticks) / (end_time - start_time); + // Reset the affinity of current thread when the lifetime of affinity_guard + // ends. } std::vector GetLoadAvg() { #if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \ - !defined(__ANDROID__) - constexpr int kMaxSamples = 3; + !(defined(__ANDROID__) && __ANDROID_API__ < 29) + static constexpr int kMaxSamples = 3; std::vector res(kMaxSamples, 0.0); const int nelem = getloadavg(res.data(), kMaxSamples); if (nelem < 1) { diff --git a/src/thread_manager.h b/src/thread_manager.h index 4680285..819b3c4 100644 --- a/src/thread_manager.h +++ b/src/thread_manager.h @@ -43,8 +43,8 @@ class ThreadManager { double manual_time_used = 0; int64_t complexity_n = 0; std::string report_label_; - std::string error_message_; - bool has_error_ = false; + std::string skip_message_; + internal::Skipped skipped_ = internal::NotSkipped; UserCounters counters; }; GUARDED_BY(GetBenchmarkMutex()) Result results; diff --git a/src/timers.cc b/src/timers.cc index 21d3db2..b23feea 100644 --- a/src/timers.cc +++ b/src/timers.cc @@ -23,7 +23,7 @@ #include #else #include -#ifndef BENCHMARK_OS_FUCHSIA +#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) #include #endif #include @@ -38,6 +38,9 @@ #include #include #endif +#if defined(BENCHMARK_OS_QURT) +#include +#endif #endif #ifdef BENCHMARK_OS_EMSCRIPTEN @@ -56,7 +59,6 @@ #include "check.h" #include "log.h" -#include "sleep.h" #include "string_util.h" namespace benchmark { @@ -65,6 +67,9 @@ namespace benchmark { #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Wunused-function" #endif +#if defined(__NVCOMPILER) +#pragma diag_suppress declared_but_not_referenced +#endif namespace { #if defined(BENCHMARK_OS_WINDOWS) @@ -79,7 +84,7 @@ double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { static_cast(user.QuadPart)) * 1e-7; } -#elif !defined(BENCHMARK_OS_FUCHSIA) +#elif !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT) double MakeTime(struct rusage const& ru) { return (static_cast(ru.ru_utime.tv_sec) + static_cast(ru.ru_utime.tv_usec) * 1e-6 + @@ -119,11 +124,15 @@ double ProcessCPUUsage() { &user_time)) return MakeTime(kernel_time, user_time); DiagnoseAndExit("GetProccessTimes() failed"); +#elif defined(BENCHMARK_OS_QURT) + return static_cast( + qurt_timer_timetick_to_us(qurt_timer_get_ticks())) * + 1.0e-6; #elif defined(BENCHMARK_OS_EMSCRIPTEN) // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. // Use Emscripten-specific API. Reported CPU time would be exactly the // same as total time, but this is ok because there aren't long-latency - // syncronous system calls in Emscripten. + // synchronous system calls in Emscripten. return emscripten_get_now() * 1e-3; #elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. @@ -149,6 +158,10 @@ double ThreadCPUUsage() { GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time); return MakeTime(kernel_time, user_time); +#elif defined(BENCHMARK_OS_QURT) + return static_cast( + qurt_timer_timetick_to_us(qurt_timer_get_ticks())) * + 1.0e-6; #elif defined(BENCHMARK_OS_MACOSX) // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. // See https://github.com/google/benchmark/pull/292 diff --git a/test/AssemblyTests.cmake b/test/AssemblyTests.cmake index 3d07858..c43c711 100644 --- a/test/AssemblyTests.cmake +++ b/test/AssemblyTests.cmake @@ -1,3 +1,23 @@ +set(CLANG_SUPPORTED_VERSION "5.0.0") +set(GCC_SUPPORTED_VERSION "5.5.0") + +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${CLANG_SUPPORTED_VERSION}) + message (WARNING + "Unsupported Clang version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${CLANG_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${GCC_SUPPORTED_VERSION}) + message (WARNING + "Unsupported GCC version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${GCC_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +else() + message (WARNING "Unsupported compiler. Assembly tests may be broken.") +endif() include(split_list) @@ -23,6 +43,7 @@ string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) macro(add_filecheck_test name) cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) add_library(${name} OBJECT ${name}.cc) + target_link_libraries(${name} PRIVATE benchmark::benchmark) set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") add_custom_target(copy_${name} ALL diff --git a/test/BUILD b/test/BUILD index df700a7..ea34fd4 100644 --- a/test/BUILD +++ b/test/BUILD @@ -1,8 +1,18 @@ +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") + +platform( + name = "windows", + constraint_values = [ + "@platforms//os:windows", + ], +) + TEST_COPTS = [ "-pedantic", "-pedantic-errors", "-std=c++11", "-Wall", + "-Wconversion", "-Wextra", "-Wshadow", # "-Wshorten-64-to-32", @@ -10,66 +20,108 @@ TEST_COPTS = [ "-fstrict-aliasing", ] -PER_SRC_COPTS = ({ - "cxx03_test.cc": ["-std=c++03"], - # Some of the issues with DoNotOptimize only occur when optimization is enabled +# Some of the issues with DoNotOptimize only occur when optimization is enabled +PER_SRC_COPTS = { "donotoptimize_test.cc": ["-O3"], -}) +} -TEST_ARGS = ["--benchmark_min_time=0.01"] +TEST_ARGS = ["--benchmark_min_time=0.01s"] -PER_SRC_TEST_ARGS = ({ +PER_SRC_TEST_ARGS = { "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], "repetitions_test.cc": [" --benchmark_repetitions=3"], - "spec_arg_test.cc" : ["--benchmark_filter=BM_NotChosen"], -}) - -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") + "spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"], + "spec_arg_verbosity_test.cc": ["--v=42"], +} cc_library( name = "output_test_helper", testonly = 1, srcs = ["output_test_helper.cc"], hdrs = ["output_test.h"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = [ "//:benchmark", "//:benchmark_internal_headers", ], ) +# Tests that use gtest. These rely on `gtest_main`. +[ + cc_test( + name = test_src[:-len(".cc")], + size = "small", + srcs = [test_src], + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }) + PER_SRC_COPTS.get(test_src, []), + deps = [ + "//:benchmark", + "//:benchmark_internal_headers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], + ) + for test_src in glob(["*_gtest.cc"]) +] + +# Tests that do not use gtest. These have their own `main` defined. [ cc_test( name = test_src[:-len(".cc")], size = "small", srcs = [test_src], args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), - copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }) + PER_SRC_COPTS.get(test_src, []), deps = [ ":output_test_helper", "//:benchmark", "//:benchmark_internal_headers", - "@com_google_googletest//:gtest", - ] + ( - ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] - ), + ], # FIXME: Add support for assembly tests to bazel. # See Issue #556 # https://github.com/google/benchmark/issues/556 ) for test_src in glob( - ["*test.cc"], + ["*_test.cc"], exclude = [ "*_assembly_test.cc", + "cxx03_test.cc", "link_main_test.cc", ], ) ] +cc_test( + name = "cxx03_test", + size = "small", + srcs = ["cxx03_test.cc"], + copts = TEST_COPTS + ["-std=c++03"], + target_compatible_with = select({ + "//:windows": ["@platforms//:incompatible"], + "//conditions:default": [], + }), + deps = [ + ":output_test_helper", + "//:benchmark", + "//:benchmark_internal_headers", + ], +) + cc_test( name = "link_main_test", size = "small", srcs = ["link_main_test.cc"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = ["//:benchmark_main"], ) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 162af53..fd88131 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,5 +1,7 @@ # Enable the tests +set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) @@ -22,6 +24,10 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) endforeach() endif() +if (NOT BUILD_SHARED_LIBS) + add_definitions(-DBENCHMARK_STATIC_DEFINE) +endif() + check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) set(BENCHMARK_O3_FLAG "") if (BENCHMARK_HAS_O3_FLAG) @@ -35,10 +41,14 @@ if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) endif() add_library(output_test_helper STATIC output_test_helper.cc output_test.h) +target_link_libraries(output_test_helper PRIVATE benchmark::benchmark) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT}) + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "NVHPC") + target_compile_options( ${name} PRIVATE --diag_suppress partial_override ) + endif() endmacro(compile_benchmark_test) macro(compile_benchmark_test_with_main name) @@ -48,26 +58,35 @@ endmacro(compile_benchmark_test_with_main) macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) - target_link_libraries(${name} output_test_helper benchmark::benchmark + target_link_libraries(${name} output_test_helper benchmark::benchmark_main ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) # Demonstration executable compile_benchmark_test(benchmark_test) -add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) +add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s) compile_benchmark_test(spec_arg_test) add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) +compile_benchmark_test(spec_arg_verbosity_test) +add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42) + compile_benchmark_test(benchmark_setup_teardown_test) add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) compile_benchmark_test(filter_test) macro(add_filter_test name filter expect) - add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) + add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect}) add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) endmacro(add_filter_test) +compile_benchmark_test(benchmark_min_time_flag_time_test) +add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test) + +compile_benchmark_test(benchmark_min_time_flag_iters_test) +add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test) + add_filter_test(filter_simple "Foo" 3) add_filter_test(filter_simple_negative "-Foo" 2) add_filter_test(filter_suffix "BM_.*" 4) @@ -88,78 +107,83 @@ add_filter_test(filter_regex_end ".*Ba$" 1) add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) -add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01) +add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s) compile_benchmark_test(basic_test) -add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01) +add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s) compile_output_test(repetitions_test) -add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01 --benchmark_repetitions=3) +add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3) compile_benchmark_test(diagnostics_test) -add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01) +add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s) compile_benchmark_test(skip_with_error_test) -add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01) +add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s) compile_benchmark_test(donotoptimize_test) +# Enable errors for deprecated deprecations (DoNotOptimize(Tp const& value)). +check_cxx_compiler_flag(-Werror=deprecated-declarations BENCHMARK_HAS_DEPRECATED_DECLARATIONS_FLAG) +if (BENCHMARK_HAS_DEPRECATED_DECLARATIONS_FLAG) + target_compile_options (donotoptimize_test PRIVATE "-Werror=deprecated-declarations") +endif() # Some of the issues with DoNotOptimize only occur when optimization is enabled check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) if (BENCHMARK_HAS_O3_FLAG) set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") endif() -add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01) +add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s) compile_benchmark_test(fixture_test) -add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01) +add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s) compile_benchmark_test(register_benchmark_test) -add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01) +add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s) compile_benchmark_test(map_test) -add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01) +add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s) compile_benchmark_test(multiple_ranges_test) -add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01) +add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s) compile_benchmark_test(args_product_test) -add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01) +add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s) compile_benchmark_test_with_main(link_main_test) -add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01) +add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s) compile_output_test(reporter_output_test) -add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01) +add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s) compile_output_test(templated_fixture_test) -add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01) +add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s) compile_output_test(user_counters_test) -add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01) +add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s) compile_output_test(perf_counters_test) -add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01 --benchmark_perf_counters=CYCLES,BRANCHES) +add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,BRANCHES) compile_output_test(internal_threading_test) -add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01) +add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s) compile_output_test(report_aggregates_only_test) -add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01) +add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(display_aggregates_only_test) -add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01) +add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(user_counters_tabular_test) -add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) +add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s) compile_output_test(user_counters_thousands_test) -add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01) +add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s) compile_output_test(memory_manager_test) -add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) +add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s) -check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) -if (BENCHMARK_HAS_CXX03_FLAG) +# MSVC does not allow to set the language standard to C++98/03. +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test PROPERTIES @@ -170,19 +194,25 @@ if (BENCHMARK_HAS_CXX03_FLAG) # causing the test to fail to compile. To prevent this we explicitly disable # the warning. check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) - if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) - set_target_properties(cxx03_test - PROPERTIES - LINK_FLAGS "-Wno-odr") + check_cxx_compiler_flag(-Wno-lto-type-mismatch BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + # Cannot set_target_properties multiple times here because the warnings will + # be overwritten on each call + set (DISABLE_LTO_WARNINGS "") + if (BENCHMARK_HAS_WNO_ODR) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-odr") endif() - add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) + if (BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch") + endif() + set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}") + add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s) endif() # Attempt to work around flaky test failures when running on Appveyor servers. if (DEFINED ENV{APPVEYOR}) - set(COMPLEXITY_MIN_TIME "0.5") + set(COMPLEXITY_MIN_TIME "0.5s") else() - set(COMPLEXITY_MIN_TIME "0.01") + set(COMPLEXITY_MIN_TIME "0.01s") endif() compile_output_test(complexity_test) add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) @@ -210,6 +240,8 @@ if (BENCHMARK_ENABLE_GTEST_TESTS) add_gtest(statistics_gtest) add_gtest(string_util_gtest) add_gtest(perf_counters_gtest) + add_gtest(time_unit_gtest) + add_gtest(min_time_parse_gtest) endif(BENCHMARK_ENABLE_GTEST_TESTS) ############################################################################### diff --git a/test/args_product_test.cc b/test/args_product_test.cc index d44f391..63b8b71 100644 --- a/test/args_product_test.cc +++ b/test/args_product_test.cc @@ -23,7 +23,7 @@ class ArgsProductFixture : public ::benchmark::Fixture { {2, 15, 10, 9}, {4, 5, 6, 11}}) {} - void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + void SetUp(const ::benchmark::State& state) override { std::vector ranges = {state.range(0), state.range(1), state.range(2), state.range(3)}; @@ -34,7 +34,7 @@ class ArgsProductFixture : public ::benchmark::Fixture { // NOTE: This is not TearDown as we want to check after _all_ runs are // complete. - virtual ~ArgsProductFixture() { + ~ArgsProductFixture() override { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; for (const auto& v : expectedValues) { diff --git a/test/basic_test.cc b/test/basic_test.cc index 3a8fd42..cba1b0f 100644 --- a/test/basic_test.cc +++ b/test/basic_test.cc @@ -5,7 +5,8 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); @@ -147,7 +148,7 @@ void BM_OneTemplateFunc(benchmark::State& state) { auto arg = state.range(0); T sum = 0; for (auto _ : state) { - sum += arg; + sum += static_cast(arg); } } BENCHMARK(BM_OneTemplateFunc)->Arg(1); @@ -159,8 +160,8 @@ void BM_TwoTemplateFunc(benchmark::State& state) { A sum = 0; B prod = 1; for (auto _ : state) { - sum += arg; - prod *= arg; + sum += static_cast(arg); + prod *= static_cast(arg); } } BENCHMARK(BM_TwoTemplateFunc)->Arg(1); diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc index 14a885b..2c9e555 100644 --- a/test/benchmark_gtest.cc +++ b/test/benchmark_gtest.cc @@ -3,12 +3,12 @@ #include #include "../src/benchmark_register.h" +#include "benchmark/benchmark.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace benchmark { namespace internal { -extern std::map* global_context; namespace { @@ -38,8 +38,9 @@ TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, FullRange8) { std::vector dst; - AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); - EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); + AddRange(&dst, int8_t{1}, std::numeric_limits::max(), int8_t{8}); + EXPECT_THAT( + dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); } TEST(AddRangeTest, FullRange64) { @@ -129,11 +130,13 @@ TEST(AddRangeTest, FullNegativeRange64) { TEST(AddRangeTest, Simple8) { std::vector dst; - AddRange(&dst, 1, 8, 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); + AddRange(&dst, int8_t{1}, int8_t{8}, int8_t{2}); + EXPECT_THAT(dst, + testing::ElementsAre(int8_t{1}, int8_t{2}, int8_t{4}, int8_t{8})); } TEST(AddCustomContext, Simple) { + std::map *&global_context = GetGlobalContext(); EXPECT_THAT(global_context, nullptr); AddCustomContext("foo", "bar"); @@ -148,6 +151,7 @@ TEST(AddCustomContext, Simple) { } TEST(AddCustomContext, DuplicateKey) { + std::map *&global_context = GetGlobalContext(); EXPECT_THAT(global_context, nullptr); AddCustomContext("foo", "bar"); diff --git a/test/benchmark_min_time_flag_iters_test.cc b/test/benchmark_min_time_flag_iters_test.cc new file mode 100644 index 0000000..3de93a7 --- /dev/null +++ b/test/benchmark_min_time_flag_iters_test.cc @@ -0,0 +1,66 @@ +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Tests that we can specify the number of iterations with +// --benchmark_min_time=x. +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + iter_nums_.push_back(report[0].iterations); + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector& GetIters() const { + return iter_nums_; + } + + private: + std::vector iter_nums_; +}; + +} // end namespace + +static void BM_MyBench(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_MyBench); + +int main(int argc, char** argv) { + // Make a fake argv and append the new --benchmark_min_time= to it. + int fake_argc = argc + 1; + const char** fake_argv = new const char*[static_cast(fake_argc)]; + for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i]; + fake_argv[argc] = "--benchmark_min_time=4x"; + + benchmark::Initialize(&fake_argc, const_cast(fake_argv)); + + TestReporter test_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, "BM_MyBench"); + assert(returned_count == 1); + + // Check the executed iters. + const std::vector iters = test_reporter.GetIters(); + assert(!iters.empty() && iters[0] == 4); + + delete[] fake_argv; + return 0; +} diff --git a/test/benchmark_min_time_flag_time_test.cc b/test/benchmark_min_time_flag_time_test.cc new file mode 100644 index 0000000..04a82eb --- /dev/null +++ b/test/benchmark_min_time_flag_time_test.cc @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Tests that we can specify the min time with +// --benchmark_min_time= (no suffix needed) OR +// --benchmark_min_time=s +namespace { + +// This is from benchmark.h +typedef int64_t IterationCount; + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + ConsoleReporter::ReportRuns(report); + }; + + virtual void ReportRunsConfig(double min_time, bool /* has_explicit_iters */, + IterationCount /* iters */) BENCHMARK_OVERRIDE { + min_times_.push_back(min_time); + } + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector& GetMinTimes() const { return min_times_; } + + private: + std::vector min_times_; +}; + +bool AlmostEqual(double a, double b) { + return std::fabs(a - b) < std::numeric_limits::epsilon(); +} + +void DoTestHelper(int* argc, const char** argv, double expected) { + benchmark::Initialize(argc, const_cast(argv)); + + TestReporter test_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, "BM_MyBench"); + assert(returned_count == 1); + + // Check the min_time + const std::vector& min_times = test_reporter.GetMinTimes(); + assert(!min_times.empty() && AlmostEqual(min_times[0], expected)); +} + +} // end namespace + +static void BM_MyBench(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_MyBench); + +int main(int argc, char** argv) { + // Make a fake argv and append the new --benchmark_min_time= to it. + int fake_argc = argc + 1; + const char** fake_argv = new const char*[static_cast(fake_argc)]; + + for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i]; + + const char* no_suffix = "--benchmark_min_time=4"; + const char* with_suffix = "--benchmark_min_time=4.0s"; + double expected = 4.0; + + fake_argv[argc] = no_suffix; + DoTestHelper(&fake_argc, fake_argv, expected); + + fake_argv[argc] = with_suffix; + DoTestHelper(&fake_argc, fake_argv, expected); + + delete[] fake_argv; + return 0; +} diff --git a/test/benchmark_name_gtest.cc b/test/benchmark_name_gtest.cc index afb401c..0a6746d 100644 --- a/test/benchmark_name_gtest.cc +++ b/test/benchmark_name_gtest.cc @@ -32,6 +32,14 @@ TEST(BenchmarkNameTest, MinTime) { EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); } +TEST(BenchmarkNameTest, MinWarmUpTime) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4"; + name.min_warmup_time = "min_warmup_time:3.5s"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_warmup_time:3.5s"); +} + TEST(BenchmarkNameTest, Iterations) { auto name = BenchmarkName(); name.function_name = "function_name"; diff --git a/test/benchmark_random_interleaving_gtest.cc b/test/benchmark_random_interleaving_gtest.cc index d04befa..7f20867 100644 --- a/test/benchmark_random_interleaving_gtest.cc +++ b/test/benchmark_random_interleaving_gtest.cc @@ -51,10 +51,9 @@ class BenchmarkTest : public testing::Test { void Execute(const std::string& pattern) { queue->Clear(); - BenchmarkReporter* reporter = new NullReporter; + std::unique_ptr reporter(new NullReporter()); FLAGS_benchmark_filter = pattern; - RunSpecifiedBenchmarks(reporter); - delete reporter; + RunSpecifiedBenchmarks(reporter.get()); queue->Put("DONE"); // End marker } diff --git a/test/benchmark_setup_teardown_test.cc b/test/benchmark_setup_teardown_test.cc index efa34e1..6c3cc2e 100644 --- a/test/benchmark_setup_teardown_test.cc +++ b/test/benchmark_setup_teardown_test.cc @@ -10,19 +10,19 @@ // Test that Setup() and Teardown() are called exactly once // for each benchmark run (single-threaded). -namespace single { +namespace singlethreaded { static int setup_call = 0; static int teardown_call = 0; -} // namespace single +} // namespace singlethreaded static void DoSetup1(const benchmark::State& state) { - ++single::setup_call; + ++singlethreaded::setup_call; // Setup/Teardown should never be called with any thread_idx != 0. assert(state.thread_index() == 0); } static void DoTeardown1(const benchmark::State& state) { - ++single::teardown_call; + ++singlethreaded::teardown_call; assert(state.thread_index() == 0); } @@ -80,11 +80,11 @@ int fixture_setup = 0; class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State&) BENCHMARK_OVERRIDE { + void SetUp(const ::benchmark::State&) override { fixture_interaction::fixture_setup++; } - ~FIXTURE_BECHMARK_NAME() {} + ~FIXTURE_BECHMARK_NAME() override {} }; BENCHMARK_F(FIXTURE_BECHMARK_NAME, BM_WithFixture)(benchmark::State& st) { @@ -134,8 +134,8 @@ int main(int argc, char** argv) { assert(ret > 0); // Setup/Teardown is called once for each arg group (1,3,5,7). - assert(single::setup_call == 4); - assert(single::teardown_call == 4); + assert(singlethreaded::setup_call == 4); + assert(singlethreaded::teardown_call == 4); // 3 group of threads calling this function (3,5,10). assert(concurrent::setup_call.load(std::memory_order_relaxed) == 3); @@ -145,7 +145,7 @@ int main(int argc, char** argv) { // Setup is called 4 times, once for each arg group (1,3,5,7) assert(fixture_interaction::setup == 4); - // Fixture::Setup is called everytime the bm routine is run. + // Fixture::Setup is called every time the bm routine is run. // The exact number is indeterministic, so we just assert that // it's more than setup. assert(fixture_interaction::fixture_setup > fixture_interaction::setup); diff --git a/test/benchmark_test.cc b/test/benchmark_test.cc index 2906cdc..94590d5 100644 --- a/test/benchmark_test.cc +++ b/test/benchmark_test.cc @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -26,7 +27,7 @@ namespace { -int BENCHMARK_NOINLINE Factorial(uint32_t n) { +int BENCHMARK_NOINLINE Factorial(int n) { return (n == 1) ? 1 : n * Factorial(n - 1); } @@ -74,7 +75,8 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; for (auto _ : state) { - benchmark::DoNotOptimize(CalculatePi(static_cast(depth))); + double pi = CalculatePi(static_cast(depth)); + benchmark::DoNotOptimize(pi); } } BENCHMARK(BM_CalculatePi)->Threads(8); @@ -90,7 +92,8 @@ static void BM_SetInsert(benchmark::State& state) { for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); - state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); + state.SetBytesProcessed(state.iterations() * state.range(1) * + static_cast(sizeof(int))); } // Test many inserts at once to reduce the total iterations needed. Otherwise, @@ -108,7 +111,7 @@ static void BM_Sequential(benchmark::State& state) { } const int64_t items_processed = state.iterations() * state.range(0); state.SetItemsProcessed(items_processed); - state.SetBytesProcessed(items_processed * sizeof(v)); + state.SetBytesProcessed(items_processed * static_cast(sizeof(v))); } BENCHMARK_TEMPLATE2(BM_Sequential, std::vector, int) ->Range(1 << 0, 1 << 10); @@ -122,7 +125,10 @@ static void BM_StringCompare(benchmark::State& state) { size_t len = static_cast(state.range(0)); std::string s1(len, '-'); std::string s2(len, '-'); - for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); + for (auto _ : state) { + auto comp = s1.compare(s2); + benchmark::DoNotOptimize(comp); + } } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); @@ -169,7 +175,7 @@ static void BM_ParallelMemset(benchmark::State& state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. - benchmark::DoNotOptimize(test_vector->at(i) = 1); + benchmark::DoNotOptimize(test_vector->at(static_cast(i)) = 1); } } @@ -244,4 +250,25 @@ BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); +static void BM_BenchmarkName(benchmark::State& state) { + for (auto _ : state) { + } + + // Check that the benchmark name is passed correctly to `state`. + assert("BM_BenchmarkName" == state.name()); +} +BENCHMARK(BM_BenchmarkName); + +// regression test for #1446 +template +static void BM_templated_test(benchmark::State& state) { + for (auto _ : state) { + type created_string; + benchmark::DoNotOptimize(created_string); + } +} + +static auto BM_templated_test_double = BM_templated_test>; +BENCHMARK(BM_templated_test_double); + BENCHMARK_MAIN(); diff --git a/test/clobber_memory_assembly_test.cc b/test/clobber_memory_assembly_test.cc index ab26913..54e26cc 100644 --- a/test/clobber_memory_assembly_test.cc +++ b/test/clobber_memory_assembly_test.cc @@ -3,6 +3,7 @@ #ifdef __clang__ #pragma clang diagnostic ignored "-Wreturn-type" #endif +BENCHMARK_DISABLE_DEPRECATED_WARNING extern "C" { diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 1251cd4..76891e0 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -70,7 +70,7 @@ int AddComplexityTest(const std::string &test_name, void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { for (int i = 0; i < 1024; ++i) { - benchmark::DoNotOptimize(&i); + benchmark::DoNotOptimize(i); } } state.SetComplexityN(state.range(0)); @@ -109,7 +109,7 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, std::vector ConstructRandomVector(int64_t size) { std::vector v; - v.reserve(static_cast(size)); + v.reserve(static_cast(size)); for (int i = 0; i < size; ++i) { v.push_back(static_cast(std::rand() % size)); } @@ -121,7 +121,8 @@ void BM_Complexity_O_N(benchmark::State &state) { // Test worst case scenario (item not in vector) const int64_t item_not_in_vector = state.range(0) * 2; for (auto _ : state) { - benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); + auto it = std::find(v.begin(), v.end(), item_not_in_vector); + benchmark::DoNotOptimize(it); } state.SetComplexityN(state.range(0)); } @@ -174,7 +175,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) ->Complexity([](benchmark::IterationCount n) { - return kLog2E * n * log(static_cast(n)); + return kLog2E * static_cast(n) * log(static_cast(n)); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) @@ -204,7 +205,8 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetComplexityN(n); } diff --git a/test/diagnostics_test.cc b/test/diagnostics_test.cc index c54d5b0..0cd3edb 100644 --- a/test/diagnostics_test.cc +++ b/test/diagnostics_test.cc @@ -49,7 +49,8 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } if (called_once == false) try_invalid_pause_resume(state); @@ -64,7 +65,8 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); while (state.KeepRunning()) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } if (called_once == false) try_invalid_pause_resume(state); @@ -74,7 +76,16 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) { BENCHMARK(BM_diagnostic_test_keep_running); int main(int argc, char* argv[]) { +#ifdef NDEBUG + // This test is exercising functionality for debug builds, which are not + // available in release builds. Skip the test if we are in that environment + // to avoid a test failure. + std::cout << "Diagnostic test disabled in release build" << std::endl; + (void)argc; + (void)argv; +#else benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); +#endif } diff --git a/test/donotoptimize_assembly_test.cc b/test/donotoptimize_assembly_test.cc index 2e86a51..dc286f5 100644 --- a/test/donotoptimize_assembly_test.cc +++ b/test/donotoptimize_assembly_test.cc @@ -3,12 +3,16 @@ #ifdef __clang__ #pragma clang diagnostic ignored "-Wreturn-type" #endif +BENCHMARK_DISABLE_DEPRECATED_WARNING extern "C" { extern int ExternInt; extern int ExternInt2; extern int ExternInt3; +extern int BigArray[2049]; + +const int ConstBigArray[2049]{}; inline int Add42(int x) { return x + 42; } @@ -23,7 +27,15 @@ struct Large { int value; int data[2]; }; + +struct ExtraLarge { + int arr[2049]; +}; } + +extern ExtraLarge ExtraLargeObj; +const ExtraLarge ConstExtraLargeObj{}; + // CHECK-LABEL: test_with_rvalue: extern "C" void test_with_rvalue() { benchmark::DoNotOptimize(Add42(0)); @@ -68,6 +80,22 @@ extern "C" void test_with_large_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_extra_large_lvalue_with_op: +extern "C" void test_with_extra_large_lvalue_with_op() { + ExtraLargeObj.arr[16] = 42; + benchmark::DoNotOptimize(ExtraLargeObj); + // CHECK: movl $42, ExtraLargeObj+64(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_big_array_with_op +extern "C" void test_with_big_array_with_op() { + BigArray[16] = 42; + benchmark::DoNotOptimize(BigArray); + // CHECK: movl $42, BigArray+64(%rip) + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_lvalue: extern "C" void test_with_non_trivial_lvalue() { NotTriviallyCopyable NTC(ExternInt); @@ -96,6 +124,18 @@ extern "C" void test_with_large_const_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_const_extra_large_obj: +extern "C" void test_with_const_extra_large_obj() { + benchmark::DoNotOptimize(ConstExtraLargeObj); + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_big_array +extern "C" void test_with_const_big_array() { + benchmark::DoNotOptimize(ConstBigArray); + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_const_lvalue: extern "C" void test_with_non_trivial_const_lvalue() { const NotTriviallyCopyable Obj(ExternInt); diff --git a/test/donotoptimize_test.cc b/test/donotoptimize_test.cc index c321f15..04ec938 100644 --- a/test/donotoptimize_test.cc +++ b/test/donotoptimize_test.cc @@ -4,9 +4,9 @@ namespace { #if defined(__GNUC__) -std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); +std::int64_t double_up(const std::int64_t x) __attribute__((const)); #endif -std::uint64_t double_up(const std::uint64_t x) { return x * 2; } +std::int64_t double_up(const std::int64_t x) { return x * 2; } } // namespace // Using DoNotOptimize on types like BitRef seem to cause a lot of problems @@ -29,6 +29,15 @@ struct BitRef { int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types + char buffer1[1] = ""; + benchmark::DoNotOptimize(buffer1); + + char buffer2[2] = ""; + benchmark::DoNotOptimize(buffer2); + + char buffer3[3] = ""; + benchmark::DoNotOptimize(buffer3); + char buffer8[8] = ""; benchmark::DoNotOptimize(buffer8); @@ -37,17 +46,24 @@ int main(int, char*[]) { char buffer1024[1024] = ""; benchmark::DoNotOptimize(buffer1024); - benchmark::DoNotOptimize(&buffer1024[0]); + char* bptr = &buffer1024[0]; + benchmark::DoNotOptimize(bptr); int x = 123; benchmark::DoNotOptimize(x); - benchmark::DoNotOptimize(&x); + int* xp = &x; + benchmark::DoNotOptimize(xp); benchmark::DoNotOptimize(x += 42); - benchmark::DoNotOptimize(double_up(x)); + std::int64_t y = double_up(x); + benchmark::DoNotOptimize(y); // These tests are to e - benchmark::DoNotOptimize(BitRef::Make()); BitRef lval = BitRef::Make(); benchmark::DoNotOptimize(lval); + +#ifdef BENCHMARK_HAS_CXX11 + // Check that accept rvalue. + benchmark::DoNotOptimize(BitRef::Make()); +#endif } diff --git a/test/filter_test.cc b/test/filter_test.cc index a567de2..4c8b8ea 100644 --- a/test/filter_test.cc +++ b/test/filter_test.cc @@ -14,28 +14,27 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + bool ReportContext(const Context& context) override { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + void ReportRuns(const std::vector& report) override { ++count_; - max_family_index_ = - std::max(max_family_index_, report[0].family_index); + max_family_index_ = std::max(max_family_index_, report[0].family_index); ConsoleReporter::ReportRuns(report); }; TestReporter() : count_(0), max_family_index_(0) {} - virtual ~TestReporter() {} + ~TestReporter() override {} - size_t GetCount() const { return count_; } + int GetCount() const { return count_; } - size_t GetMaxFamilyIndex() const { return max_family_index_; } + int64_t GetMaxFamilyIndex() const { return max_family_index_; } private: - mutable size_t count_; - mutable size_t max_family_index_; + mutable int count_; + mutable int64_t max_family_index_; }; } // end namespace @@ -79,13 +78,13 @@ int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); TestReporter test_reporter; - const size_t returned_count = - benchmark::RunSpecifiedBenchmarks(&test_reporter); + const int64_t returned_count = + static_cast(benchmark::RunSpecifiedBenchmarks(&test_reporter)); if (argc == 2) { // Make sure we ran all of the tests std::stringstream ss(argv[1]); - size_t expected_return; + int64_t expected_return; ss >> expected_return; if (returned_count != expected_return) { @@ -95,8 +94,8 @@ int main(int argc, char** argv) { return -1; } - const size_t expected_reports = list_only ? 0 : expected_return; - const size_t reports_count = test_reporter.GetCount(); + const int64_t expected_reports = list_only ? 0 : expected_return; + const int64_t reports_count = test_reporter.GetCount(); if (reports_count != expected_reports) { std::cerr << "ERROR: Expected " << expected_reports << " tests to be run but reported_count = " << reports_count @@ -104,8 +103,8 @@ int main(int argc, char** argv) { return -1; } - const size_t max_family_index = test_reporter.GetMaxFamilyIndex(); - const size_t num_families = reports_count == 0 ? 0 : 1 + max_family_index; + const int64_t max_family_index = test_reporter.GetMaxFamilyIndex(); + const int64_t num_families = reports_count == 0 ? 0 : 1 + max_family_index; if (num_families != expected_reports) { std::cerr << "ERROR: Expected " << expected_reports << " test families to be run but num_families = " diff --git a/test/fixture_test.cc b/test/fixture_test.cc index af650db..d1093eb 100644 --- a/test/fixture_test.cc +++ b/test/fixture_test.cc @@ -8,21 +8,21 @@ class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + void SetUp(const ::benchmark::State& state) override { if (state.thread_index() == 0) { assert(data.get() == nullptr); data.reset(new int(42)); } } - void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + void TearDown(const ::benchmark::State& state) override { if (state.thread_index() == 0) { assert(data.get() != nullptr); data.reset(); } } - ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); } + ~FIXTURE_BECHMARK_NAME() override { assert(data == nullptr); } std::unique_ptr data; }; diff --git a/test/link_main_test.cc b/test/link_main_test.cc index 241ad5c..e806500 100644 --- a/test/link_main_test.cc +++ b/test/link_main_test.cc @@ -2,7 +2,8 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); diff --git a/test/map_test.cc b/test/map_test.cc index 5096134..0fdba7c 100644 --- a/test/map_test.cc +++ b/test/map_test.cc @@ -24,7 +24,8 @@ static void BM_MapLookup(benchmark::State& state) { m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); + auto it = m.find(std::rand() % size); + benchmark::DoNotOptimize(it); } } state.SetItemsProcessed(state.iterations() * size); @@ -34,11 +35,11 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); // Using fixtures. class MapFixture : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& st) BENCHMARK_OVERRIDE { + void SetUp(const ::benchmark::State& st) override { m = ConstructRandomMap(static_cast(st.range(0))); } - void TearDown(const ::benchmark::State&) BENCHMARK_OVERRIDE { m.clear(); } + void TearDown(const ::benchmark::State&) override { m.clear(); } std::map m; }; @@ -47,7 +48,8 @@ BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { const int size = static_cast(state.range(0)); for (auto _ : state) { for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); + auto it = m.find(std::rand() % size); + benchmark::DoNotOptimize(it); } } state.SetItemsProcessed(state.iterations() * size); diff --git a/test/memory_manager_test.cc b/test/memory_manager_test.cc index f0c192f..d94bd51 100644 --- a/test/memory_manager_test.cc +++ b/test/memory_manager_test.cc @@ -5,16 +5,17 @@ #include "output_test.h" class TestMemoryManager : public benchmark::MemoryManager { - void Start() BENCHMARK_OVERRIDE {} - void Stop(Result* result) BENCHMARK_OVERRIDE { - result->num_allocs = 42; - result->max_bytes_used = 42000; + void Start() override {} + void Stop(Result& result) override { + result.num_allocs = 42; + result.max_bytes_used = 42000; } }; void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); diff --git a/test/min_time_parse_gtest.cc b/test/min_time_parse_gtest.cc new file mode 100644 index 0000000..e2bdf67 --- /dev/null +++ b/test/min_time_parse_gtest.cc @@ -0,0 +1,30 @@ +#include "../src/benchmark_runner.h" +#include "gtest/gtest.h" + +namespace { + +TEST(ParseMinTimeTest, InvalidInput) { +#if GTEST_HAS_DEATH_TEST + // Tests only runnable in debug mode (when BM_CHECK is enabled). +#ifndef NDEBUG +#ifndef TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("abc"); }, + "Malformed seconds value passed to --benchmark_min_time: `abc`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("123ms"); }, + "Malformed seconds value passed to --benchmark_min_time: `123ms`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("1z"); }, + "Malformed seconds value passed to --benchmark_min_time: `1z`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("1hs"); }, + "Malformed seconds value passed to --benchmark_min_time: `1hs`"); +#endif +#endif +#endif +} +} // namespace diff --git a/test/multiple_ranges_test.cc b/test/multiple_ranges_test.cc index 7618c4d..5300a96 100644 --- a/test/multiple_ranges_test.cc +++ b/test/multiple_ranges_test.cc @@ -28,7 +28,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { {2, 7, 15}, {7, 6, 3}}) {} - void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + void SetUp(const ::benchmark::State& state) override { std::vector ranges = {state.range(0), state.range(1), state.range(2)}; @@ -39,7 +39,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { // NOTE: This is not TearDown as we want to check after _all_ runs are // complete. - virtual ~MultipleRangesFixture() { + ~MultipleRangesFixture() override { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; for (const auto& v : expectedValues) { diff --git a/test/options_test.cc b/test/options_test.cc index d424d40..a1b209f 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -33,6 +33,8 @@ BENCHMARK(BM_basic)->DenseRange(10, 15); BENCHMARK(BM_basic)->Args({42, 42}); BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); BENCHMARK(BM_basic)->MinTime(0.7); +BENCHMARK(BM_basic)->MinWarmUpTime(0.8); +BENCHMARK(BM_basic)->MinTime(0.1)->MinWarmUpTime(0.2); BENCHMARK(BM_basic)->UseRealTime(); BENCHMARK(BM_basic)->ThreadRange(2, 4); BENCHMARK(BM_basic)->ThreadPerCpu(); @@ -65,8 +67,8 @@ void BM_explicit_iteration_count(benchmark::State& state) { // Test that the requested iteration count is respected. assert(state.max_iterations == 42); - size_t actual_iterations = 0; - for (auto _ : state) ++actual_iterations; + for (auto _ : state) { + } assert(state.iterations() == state.max_iterations); assert(state.iterations() == 42); } diff --git a/test/output_test.h b/test/output_test.h index c6ff8ef..c08fe1d 100644 --- a/test/output_test.h +++ b/test/output_test.h @@ -85,7 +85,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]); struct Results; typedef std::function ResultsCheckFn; -size_t AddChecker(const char* bm_name_pattern, const ResultsCheckFn& fn); +size_t AddChecker(const std::string& bm_name_pattern, const ResultsCheckFn& fn); // Class holding the results of a benchmark. // It is passed in calls to checker functions. @@ -117,7 +117,7 @@ struct Results { // get the string for a result by name, or nullptr if the name // is not found - const std::string* Get(const char* entry_name) const { + const std::string* Get(const std::string& entry_name) const { auto it = values.find(entry_name); if (it == values.end()) return nullptr; return &it->second; @@ -126,12 +126,12 @@ struct Results { // get a result by name, parsed as a specific type. // NOTE: for counters, use GetCounterAs instead. template - T GetAs(const char* entry_name) const; + T GetAs(const std::string& entry_name) const; // counters are written as doubles, so they have to be read first // as a double, and only then converted to the asked type. template - T GetCounterAs(const char* entry_name) const { + T GetCounterAs(const std::string& entry_name) const { double dval = GetAs(entry_name); T tval = static_cast(dval); return tval; @@ -139,7 +139,7 @@ struct Results { }; template -T Results::GetAs(const char* entry_name) const { +T Results::GetAs(const std::string& entry_name) const { auto* sv = Get(entry_name); BM_CHECK(sv != nullptr && !sv->empty()); std::stringstream ss; diff --git a/test/output_test_helper.cc b/test/output_test_helper.cc index 81584cb..2567370 100644 --- a/test/output_test_helper.cc +++ b/test/output_test_helper.cc @@ -45,7 +45,7 @@ SubMap& GetSubstitutions() { static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, // human-readable float - {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kKMGTPEZYmunpfazy]?i?"}, {"%percentage", percentage_re}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, @@ -143,7 +143,7 @@ class TestReporter : public benchmark::BenchmarkReporter { TestReporter(std::vector reps) : reporters_(std::move(reps)) {} - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + bool ReportContext(const Context& context) override { bool last_ret = false; bool first = true; for (auto rep : reporters_) { @@ -157,10 +157,10 @@ class TestReporter : public benchmark::BenchmarkReporter { return last_ret; } - void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + void ReportRuns(const std::vector& report) override { for (auto rep : reporters_) rep->ReportRuns(report); } - void Finalize() BENCHMARK_OVERRIDE { + void Finalize() override { for (auto rep : reporters_) rep->Finalize(); } @@ -248,9 +248,8 @@ void ResultsChecker::CheckResults(std::stringstream& output) { if (!p.regex->Match(r.name)) { BM_VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; continue; - } else { - BM_VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; } + BM_VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; BM_VLOG(1) << "Checking results of " << r.name << ": ... \n"; p.fn(r); BM_VLOG(1) << "Checking results of " << r.name << ": OK.\n"; @@ -300,7 +299,7 @@ std::vector ResultsChecker::SplitCsv_(const std::string& line) { } // end namespace internal -size_t AddChecker(const char* bm_name, const ResultsCheckFn& fn) { +size_t AddChecker(const std::string& bm_name, const ResultsCheckFn& fn) { auto& rc = internal::GetResultsChecker(); rc.Add(bm_name, fn); return rc.results.size(); @@ -328,16 +327,18 @@ double Results::GetTime(BenchmarkTime which) const { BM_CHECK(unit); if (*unit == "ns") { return val * 1.e-9; - } else if (*unit == "us") { - return val * 1.e-6; - } else if (*unit == "ms") { - return val * 1.e-3; - } else if (*unit == "s") { - return val; - } else { - BM_CHECK(1 == 0) << "unknown time unit: " << *unit; - return 0; } + if (*unit == "us") { + return val * 1.e-6; + } + if (*unit == "ms") { + return val * 1.e-3; + } + if (*unit == "s") { + return val; + } + BM_CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; } // ========================================================================= // @@ -393,14 +394,14 @@ void RunOutputTests(int argc, char* argv[]) { benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; struct ReporterTest { - const char* name; + std::string name; std::vector& output_cases; std::vector& error_cases; benchmark::BenchmarkReporter& reporter; std::stringstream out_stream; std::stringstream err_stream; - ReporterTest(const char* n, std::vector& out_tc, + ReporterTest(const std::string& n, std::vector& out_tc, std::vector& err_tc, benchmark::BenchmarkReporter& br) : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { @@ -408,12 +409,12 @@ void RunOutputTests(int argc, char* argv[]) { reporter.SetErrorStream(&err_stream); } } TestCases[] = { - {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), + {std::string("ConsoleReporter"), GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR}, - {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), - JR}, - {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), - CSVR}, + {std::string("JSONReporter"), GetTestCaseList(TC_JSONOut), + GetTestCaseList(TC_JSONErr), JR}, + {std::string("CSVReporter"), GetTestCaseList(TC_CSVOut), + GetTestCaseList(TC_CSVErr), CSVR}, }; // Create the test reporter and run the benchmarks. @@ -422,7 +423,8 @@ void RunOutputTests(int argc, char* argv[]) { benchmark::RunSpecifiedBenchmarks(&test_rep); for (auto& rep_test : TestCases) { - std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; + std::string msg = + std::string("\nTesting ") + rep_test.name + std::string(" Output\n"); std::string banner(msg.size() - 1, '-'); std::cout << banner << msg << banner << "\n"; @@ -439,7 +441,7 @@ void RunOutputTests(int argc, char* argv[]) { // the checks to subscribees. auto& csv = TestCases[2]; // would use == but gcc spits a warning - BM_CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + BM_CHECK(csv.name == std::string("CSVReporter")); internal::GetResultsChecker().CheckResults(csv.out_stream); } diff --git a/test/perf_counters_gtest.cc b/test/perf_counters_gtest.cc index 3eac624..54c7863 100644 --- a/test/perf_counters_gtest.cc +++ b/test/perf_counters_gtest.cc @@ -1,6 +1,8 @@ +#include #include #include "../src/perf_counters.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" #ifndef GTEST_SKIP @@ -11,12 +13,15 @@ struct MsgHandler { #endif using benchmark::internal::PerfCounters; +using benchmark::internal::PerfCountersMeasurement; using benchmark::internal::PerfCounterValues; +using ::testing::AllOf; +using ::testing::Gt; +using ::testing::Lt; namespace { const char kGenericPerfEvent1[] = "CYCLES"; -const char kGenericPerfEvent2[] = "BRANCHES"; -const char kGenericPerfEvent3[] = "INSTRUCTIONS"; +const char kGenericPerfEvent2[] = "INSTRUCTIONS"; TEST(PerfCountersTest, Init) { EXPECT_EQ(PerfCounters::Initialize(), PerfCounters::kSupported); @@ -27,7 +32,7 @@ TEST(PerfCountersTest, OneCounter) { GTEST_SKIP() << "Performance counters not supported.\n"; } EXPECT_TRUE(PerfCounters::Initialize()); - EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1}).IsValid()); + EXPECT_EQ(PerfCounters::Create({kGenericPerfEvent1}).num_counters(), 1); } TEST(PerfCountersTest, NegativeTest) { @@ -36,29 +41,44 @@ TEST(PerfCountersTest, NegativeTest) { return; } EXPECT_TRUE(PerfCounters::Initialize()); - EXPECT_FALSE(PerfCounters::Create({}).IsValid()); - EXPECT_FALSE(PerfCounters::Create({""}).IsValid()); - EXPECT_FALSE(PerfCounters::Create({"not a counter name"}).IsValid()); + // Sanity checks + // Create() will always create a valid object, even if passed no or + // wrong arguments as the new behavior is to warn and drop unsupported + // counters + EXPECT_EQ(PerfCounters::Create({}).num_counters(), 0); + EXPECT_EQ(PerfCounters::Create({""}).num_counters(), 0); + EXPECT_EQ(PerfCounters::Create({"not a counter name"}).num_counters(), 0); { - EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, - kGenericPerfEvent3}) - .IsValid()); + // Try sneaking in a bad egg to see if it is filtered out. The + // number of counters has to be two, not zero + auto counter = + PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector( + {kGenericPerfEvent2, kGenericPerfEvent1})); } - EXPECT_FALSE( - PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1}) - .IsValid()); - EXPECT_FALSE(PerfCounters::Create({kGenericPerfEvent3, "not a counter name", - kGenericPerfEvent1}) - .IsValid()); { - EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, - kGenericPerfEvent3}) - .IsValid()); + // Try sneaking in an outrageous counter, like a fat finger mistake + auto counter = PerfCounters::Create( + {kGenericPerfEvent2, "not a counter name", kGenericPerfEvent1}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector( + {kGenericPerfEvent2, kGenericPerfEvent1})); + } + { + // Finally try a golden input - it should like both of them + EXPECT_EQ(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}) + .num_counters(), + 2); + } + { + // Add a bad apple in the end of the chain to check the edges + auto counter = PerfCounters::Create( + {kGenericPerfEvent1, kGenericPerfEvent2, "bad event name"}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector( + {kGenericPerfEvent1, kGenericPerfEvent2})); } - EXPECT_FALSE( - PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, - kGenericPerfEvent3, "MISPREDICTED_BRANCH_RETIRED"}) - .IsValid()); } TEST(PerfCountersTest, Read1Counter) { @@ -67,7 +87,7 @@ TEST(PerfCountersTest, Read1Counter) { } EXPECT_TRUE(PerfCounters::Initialize()); auto counters = PerfCounters::Create({kGenericPerfEvent1}); - EXPECT_TRUE(counters.IsValid()); + EXPECT_EQ(counters.num_counters(), 1); PerfCounterValues values1(1); EXPECT_TRUE(counters.Snapshot(&values1)); EXPECT_GT(values1[0], 0); @@ -84,7 +104,7 @@ TEST(PerfCountersTest, Read2Counters) { EXPECT_TRUE(PerfCounters::Initialize()); auto counters = PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}); - EXPECT_TRUE(counters.IsValid()); + EXPECT_EQ(counters.num_counters(), 2); PerfCounterValues values1(2); EXPECT_TRUE(counters.Snapshot(&values1)); EXPECT_GT(values1[0], 0); @@ -95,30 +115,121 @@ TEST(PerfCountersTest, Read2Counters) { EXPECT_GT(values2[1], 0); } -size_t do_work() { - size_t res = 0; - for (size_t i = 0; i < 100000000; ++i) res += i * i; - return res; +TEST(PerfCountersTest, ReopenExistingCounters) { + // This test works in recent and old Intel hardware, Pixel 3, and Pixel 6. + // However we cannot make assumptions beyond 2 HW counters due to Pixel 6. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector kMetrics({kGenericPerfEvent1}); + std::vector counters(2); + for (auto& counter : counters) { + counter = PerfCounters::Create(kMetrics); + } + PerfCounterValues values(1); + EXPECT_TRUE(counters[0].Snapshot(&values)); + EXPECT_TRUE(counters[1].Snapshot(&values)); } -void measure(size_t threadcount, PerfCounterValues* values1, - PerfCounterValues* values2) { - BM_CHECK_NE(values1, nullptr); - BM_CHECK_NE(values2, nullptr); +TEST(PerfCountersTest, CreateExistingMeasurements) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (2) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + + // This means we will try 10 counters but we can only guarantee + // for sure at this time that only 3 will work. Perhaps in the future + // we could use libpfm to query for the hardware limits on this + // particular platform. + const int kMaxCounters = 10; + const int kMinValidCounters = 2; + + // Let's use a ubiquitous counter that is guaranteed to work + // on all platforms + const std::vector kMetrics{"cycles"}; + + // Cannot create a vector of actual objects because the + // copy constructor of PerfCounters is deleted - and so is + // implicitly deleted on PerfCountersMeasurement too + std::vector> + perf_counter_measurements; + + perf_counter_measurements.reserve(kMaxCounters); + for (int j = 0; j < kMaxCounters; ++j) { + perf_counter_measurements.emplace_back( + new PerfCountersMeasurement(kMetrics)); + } + + std::vector> measurements; + + // Start all counters together to see if they hold + size_t max_counters = kMaxCounters; + for (size_t i = 0; i < kMaxCounters; ++i) { + auto& counter(*perf_counter_measurements[i]); + EXPECT_EQ(counter.num_counters(), 1); + if (!counter.Start()) { + max_counters = i; + break; + }; + } + + ASSERT_GE(max_counters, kMinValidCounters); + + // Start all together + for (size_t i = 0; i < max_counters; ++i) { + auto& counter(*perf_counter_measurements[i]); + EXPECT_TRUE(counter.Stop(measurements) || (i >= kMinValidCounters)); + } + + // Start/stop individually + for (size_t i = 0; i < max_counters; ++i) { + auto& counter(*perf_counter_measurements[i]); + measurements.clear(); + counter.Start(); + EXPECT_TRUE(counter.Stop(measurements) || (i >= kMinValidCounters)); + } +} + +// We try to do some meaningful work here but the compiler +// insists in optimizing away our loop so we had to add a +// no-optimize macro. In case it fails, we added some entropy +// to this pool as well. + +BENCHMARK_DONT_OPTIMIZE size_t do_work() { + static std::mt19937 rd{std::random_device{}()}; + static std::uniform_int_distribution mrand(0, 10); + const size_t kNumLoops = 1000000; + size_t sum = 0; + for (size_t j = 0; j < kNumLoops; ++j) { + sum += mrand(rd); + } + benchmark::DoNotOptimize(sum); + return sum; +} + +void measure(size_t threadcount, PerfCounterValues* before, + PerfCounterValues* after) { + BM_CHECK_NE(before, nullptr); + BM_CHECK_NE(after, nullptr); std::vector threads(threadcount); auto work = [&]() { BM_CHECK(do_work() > 1000); }; // We need to first set up the counters, then start the threads, so the - // threads would inherit the counters. But later, we need to first destroy the - // thread pool (so all the work finishes), then measure the counters. So the - // scopes overlap, and we need to explicitly control the scope of the + // threads would inherit the counters. But later, we need to first destroy + // the thread pool (so all the work finishes), then measure the counters. So + // the scopes overlap, and we need to explicitly control the scope of the // threadpool. auto counters = - PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent3}); + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}); for (auto& t : threads) t = std::thread(work); - counters.Snapshot(values1); + counters.Snapshot(before); for (auto& t : threads) t.join(); - counters.Snapshot(values2); + counters.Snapshot(after); } TEST(PerfCountersTest, MultiThreaded) { @@ -126,20 +237,71 @@ TEST(PerfCountersTest, MultiThreaded) { GTEST_SKIP() << "Test skipped because libpfm is not supported."; } EXPECT_TRUE(PerfCounters::Initialize()); - PerfCounterValues values1(2); - PerfCounterValues values2(2); + PerfCounterValues before(2); + PerfCounterValues after(2); - measure(2, &values1, &values2); - std::vector D1{static_cast(values2[0] - values1[0]), - static_cast(values2[1] - values1[1])}; + // Notice that this test will work even if we taskset it to a single CPU + // In this case the threads will run sequentially + // Start two threads and measure the number of combined cycles and + // instructions + measure(2, &before, &after); + std::vector Elapsed2Threads{ + static_cast(after[0] - before[0]), + static_cast(after[1] - before[1])}; - measure(4, &values1, &values2); - std::vector D2{static_cast(values2[0] - values1[0]), - static_cast(values2[1] - values1[1])}; + // Start four threads and measure the number of combined cycles and + // instructions + measure(4, &before, &after); + std::vector Elapsed4Threads{ + static_cast(after[0] - before[0]), + static_cast(after[1] - before[1])}; - // Some extra work will happen on the main thread - like joining the threads - // - so the ratio won't be quite 2.0, but very close. - EXPECT_GE(D2[0], 1.9 * D1[0]); - EXPECT_GE(D2[1], 1.9 * D1[1]); + // The following expectations fail (at least on a beefy workstation with lots + // of cpus) - it seems that in some circumstances the runtime of 4 threads + // can even be better than with 2. + // So instead of expecting 4 threads to be slower, let's just make sure they + // do not differ too much in general (one is not more than 10x than the + // other). + EXPECT_THAT(Elapsed4Threads[0] / Elapsed2Threads[0], AllOf(Gt(0.1), Lt(10))); + EXPECT_THAT(Elapsed4Threads[1] / Elapsed2Threads[1], AllOf(Gt(0.1), Lt(10))); } + +TEST(PerfCountersTest, HardwareLimits) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + + // Taken from `perf list`, but focusses only on those HW events that actually + // were reported when running `sudo perf stat -a sleep 10`, intersected over + // several platforms. All HW events listed in the first command not reported + // in the second seem to not work. This is sad as we don't really get to test + // the grouping here (groups can contain up to 6 members)... + std::vector counter_names{ + "cycles", // leader + "instructions", // + "branch-misses", // + }; + + // In the off-chance that some of these values are not supported, + // we filter them out so the test will complete without failure + // albeit it might not actually test the grouping on that platform + std::vector valid_names; + for (const std::string& name : counter_names) { + if (PerfCounters::IsCounterSupported(name)) { + valid_names.push_back(name); + } + } + PerfCountersMeasurement counter(valid_names); + + std::vector> measurements; + + counter.Start(); + EXPECT_TRUE(counter.Stop(measurements)); +} + } // namespace diff --git a/test/perf_counters_test.cc b/test/perf_counters_test.cc index 3017a45..b0a3ab0 100644 --- a/test/perf_counters_test.cc +++ b/test/perf_counters_test.cc @@ -1,27 +1,92 @@ +#include #undef NDEBUG +#include "../src/commandlineflags.h" #include "../src/perf_counters.h" - #include "benchmark/benchmark.h" #include "output_test.h" +namespace benchmark { + +BM_DECLARE_string(benchmark_perf_counters); + +} // namespace benchmark + static void BM_Simple(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_Simple); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Simple\",$"}}); +const int kIters = 1000000; + +void BM_WithoutPauseResume(benchmark::State& state) { + int n = 0; + + for (auto _ : state) { + for (auto i = 0; i < kIters; ++i) { + n = 1 - n; + benchmark::DoNotOptimize(n); + } + } +} + +BENCHMARK(BM_WithoutPauseResume); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_WithoutPauseResume\",$"}}); + +void BM_WithPauseResume(benchmark::State& state) { + int m = 0, n = 0; + + for (auto _ : state) { + for (auto i = 0; i < kIters; ++i) { + n = 1 - n; + benchmark::DoNotOptimize(n); + } + + state.PauseTiming(); + for (auto j = 0; j < kIters; ++j) { + m = 1 - m; + benchmark::DoNotOptimize(m); + } + state.ResumeTiming(); + } +} + +BENCHMARK(BM_WithPauseResume); + +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_WithPauseResume\",$"}}); + static void CheckSimple(Results const& e) { CHECK_COUNTER_VALUE(e, double, "CYCLES", GT, 0); - CHECK_COUNTER_VALUE(e, double, "BRANCHES", GT, 0.0); } + +double withoutPauseResumeInstrCount = 0.0; +double withPauseResumeInstrCount = 0.0; + +static void SaveInstrCountWithoutResume(Results const& e) { + withoutPauseResumeInstrCount = e.GetAs("INSTRUCTIONS"); +} + +static void SaveInstrCountWithResume(Results const& e) { + withPauseResumeInstrCount = e.GetAs("INSTRUCTIONS"); +} + CHECK_BENCHMARK_RESULTS("BM_Simple", &CheckSimple); +CHECK_BENCHMARK_RESULTS("BM_WithoutPauseResume", &SaveInstrCountWithoutResume); +CHECK_BENCHMARK_RESULTS("BM_WithPauseResume", &SaveInstrCountWithResume); int main(int argc, char* argv[]) { if (!benchmark::internal::PerfCounters::kSupported) { return 0; } + benchmark::FLAGS_benchmark_perf_counters = "CYCLES,INSTRUCTIONS"; + benchmark::internal::PerfCounters::Initialize(); RunOutputTests(argc, argv); + + BM_CHECK_GT(withPauseResumeInstrCount, kIters); + BM_CHECK_GT(withoutPauseResumeInstrCount, kIters); + BM_CHECK_LT(withPauseResumeInstrCount, 1.5 * withoutPauseResumeInstrCount); } diff --git a/test/register_benchmark_test.cc b/test/register_benchmark_test.cc index 602405b..d69d144 100644 --- a/test/register_benchmark_test.cc +++ b/test/register_benchmark_test.cc @@ -10,7 +10,7 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + void ReportRuns(const std::vector& report) override { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } @@ -19,11 +19,11 @@ class TestReporter : public benchmark::ConsoleReporter { }; struct TestCase { - std::string name; - const char* label; + const std::string name; + const std::string label; // Note: not explicit as we rely on it being converted through ADD_CASES. - TestCase(const char* xname) : TestCase(xname, nullptr) {} - TestCase(const char* xname, const char* xlabel) + TestCase(const std::string& xname) : TestCase(xname, "") {} + TestCase(const std::string& xname, const std::string& xlabel) : name(xname), label(xlabel) {} typedef benchmark::BenchmarkReporter::Run Run; @@ -32,7 +32,7 @@ struct TestCase { // clang-format off BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name(); - if (label) { + if (!label.empty()) { BM_CHECK(run.report_label == label) << "expected " << label << " got " << run.report_label; } else { @@ -95,6 +95,18 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); #endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK +//----------------------------------------------------------------------------// +// Test RegisterBenchmark with DISABLED_ benchmark +//----------------------------------------------------------------------------// +void DISABLED_BM_function(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(DISABLED_BM_function); +ReturnVal dummy3 = benchmark::RegisterBenchmark("DISABLED_BM_function_manual", + DISABLED_BM_function); +// No need to add cases because we don't expect them to run. + //----------------------------------------------------------------------------// // Test RegisterBenchmark with different callable types //----------------------------------------------------------------------------// @@ -111,7 +123,7 @@ void TestRegistrationAtRuntime() { { CustomFixture fx; benchmark::RegisterBenchmark("custom_fixture", fx); - AddCases({"custom_fixture"}); + AddCases({std::string("custom_fixture")}); } #endif #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc index 2b6e654..2eb545a 100644 --- a/test/reporter_output_test.cc +++ b/test/reporter_output_test.cc @@ -17,7 +17,7 @@ static int AddContextCases() { AddCases(TC_ConsoleErr, { {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default}, - {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, + {"Running .*(/|\\\\)reporter_output_test(\\.exe)?$", MR_Next}, {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, }); AddCases(TC_JSONOut, @@ -93,7 +93,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); void BM_bytes_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetBytesProcessed(1); } @@ -124,7 +125,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); void BM_items_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetItemsProcessed(1); } @@ -318,7 +320,7 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}, ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); // ========================================================================= // -// ------------------------ Testing Arg Name Output ----------------------- // +// ------------------------ Testing Arg Name Output ------------------------ // // ========================================================================= // void BM_arg_name(benchmark::State& state) { @@ -404,7 +406,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, void BM_Complexity_O1(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetComplexityN(state.range(0)); } diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc index 026d479..b4c5e15 100644 --- a/test/skip_with_error_test.cc +++ b/test/skip_with_error_test.cc @@ -10,17 +10,17 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + bool ReportContext(const Context& context) override { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + void ReportRuns(const std::vector& report) override { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } TestReporter() {} - virtual ~TestReporter() {} + ~TestReporter() override {} mutable std::vector all_runs_; }; @@ -35,8 +35,9 @@ struct TestCase { void CheckRun(Run const& run) const { BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name(); - BM_CHECK(error_occurred == run.error_occurred); - BM_CHECK(error_message == run.error_message); + BM_CHECK_EQ(error_occurred, + benchmark::internal::SkippedWithError == run.skipped); + BM_CHECK(error_message == run.skip_message); if (error_occurred) { // BM_CHECK(run.iterations == 0); } else { @@ -47,7 +48,8 @@ struct TestCase { std::vector ExpectedResults; -int AddCases(const char* base_name, std::initializer_list const& v) { +int AddCases(const std::string& base_name, + std::initializer_list const& v) { for (auto TC : v) { TC.name = base_name + TC.name; ExpectedResults.push_back(std::move(TC)); @@ -141,7 +143,8 @@ ADD_CASES("BM_error_during_running_ranged_for", void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } if (state.thread_index() <= (state.threads() / 2)) state.SkipWithError("error message"); diff --git a/test/spec_arg_test.cc b/test/spec_arg_test.cc index 043db1b..06aafbe 100644 --- a/test/spec_arg_test.cc +++ b/test/spec_arg_test.cc @@ -17,11 +17,11 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + bool ReportContext(const Context& context) override { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + void ReportRuns(const std::vector& report) override { assert(report.size() == 1); matched_functions.push_back(report[0].run_name.function_name); ConsoleReporter::ReportRuns(report); @@ -29,7 +29,7 @@ class TestReporter : public benchmark::ConsoleReporter { TestReporter() {} - virtual ~TestReporter() {} + ~TestReporter() override {} const std::vector& GetMatchedFunctions() const { return matched_functions; @@ -91,5 +91,15 @@ int main(int argc, char** argv) { << matched_functions.front() << "]\n"; return 2; } + + // Test that SetBenchmarkFilter works. + const std::string golden_value = "golden_value"; + benchmark::SetBenchmarkFilter(golden_value); + std::string current_value = benchmark::GetBenchmarkFilter(); + if (golden_value != current_value) { + std::cerr << "Expected [" << golden_value + << "] for --benchmark_filter but got [" << current_value << "]\n"; + return 3; + } return 0; } diff --git a/test/spec_arg_verbosity_test.cc b/test/spec_arg_verbosity_test.cc new file mode 100644 index 0000000..8f8eb6d --- /dev/null +++ b/test/spec_arg_verbosity_test.cc @@ -0,0 +1,43 @@ +#include + +#include + +#include "benchmark/benchmark.h" + +// Tests that the user specified verbosity level can be get. +static void BM_Verbosity(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Verbosity); + +int main(int argc, char** argv) { + const int32_t flagv = 42; + + // Verify that argv specify --v=42. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--v=42", argv[i]) == 0) { + found = true; + break; + } + } + if (!found) { + std::cerr << "This test requires '--v=42' to be passed as a command-line " + << "argument.\n"; + return 1; + } + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkVerbosity() function. + if (flagv != benchmark::GetBenchmarkVerbosity()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkVerbosity() returns [" + << benchmark::GetBenchmarkVerbosity() << "] expected flag=[" << flagv + << "]\n"; + return 1; + } + return 0; +} diff --git a/test/string_util_gtest.cc b/test/string_util_gtest.cc index 698f2d4..67b4bc0 100644 --- a/test/string_util_gtest.cc +++ b/test/string_util_gtest.cc @@ -1,9 +1,12 @@ //===---------------------------------------------------------------------===// -// statistics_test - Unit tests for src/statistics.cc +// string_util_test - Unit tests for src/string_util.cc //===---------------------------------------------------------------------===// +#include + #include "../src/internal_macros.h" #include "../src/string_util.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace { @@ -63,7 +66,10 @@ TEST(StringUtilTest, stoul) { EXPECT_EQ(4ul, pos); } #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); } + { + ASSERT_THROW(std::ignore = benchmark::stoul("this is a test"), + std::invalid_argument); + } #endif } @@ -107,7 +113,10 @@ EXPECT_EQ(1ul, pos); EXPECT_EQ(4ul, pos); } #ifndef BENCHMARK_HAS_NO_EXCEPTIONS -{ ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); } +{ + ASSERT_THROW(std::ignore = benchmark::stoi("this is a test"), + std::invalid_argument); +} #endif } @@ -137,7 +146,10 @@ EXPECT_EQ(1ul, pos); EXPECT_EQ(8ul, pos); } #ifndef BENCHMARK_HAS_NO_EXCEPTIONS -{ ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); } +{ + ASSERT_THROW(std::ignore = benchmark::stod("this is a test"), + std::invalid_argument); +} #endif } @@ -149,4 +161,39 @@ TEST(StringUtilTest, StrSplit) { std::vector({"hello", "there", "is", "more"})); } +using HumanReadableFixture = ::testing::TestWithParam< + std::tuple>; + +INSTANTIATE_TEST_SUITE_P( + HumanReadableTests, HumanReadableFixture, + ::testing::Values( + std::make_tuple(0.0, benchmark::Counter::kIs1024, "0"), + std::make_tuple(999.0, benchmark::Counter::kIs1024, "999"), + std::make_tuple(1000.0, benchmark::Counter::kIs1024, "1000"), + std::make_tuple(1024.0, benchmark::Counter::kIs1024, "1Ki"), + std::make_tuple(1000 * 1000.0, benchmark::Counter::kIs1024, + "976\\.56.Ki"), + std::make_tuple(1024 * 1024.0, benchmark::Counter::kIs1024, "1Mi"), + std::make_tuple(1000 * 1000 * 1000.0, benchmark::Counter::kIs1024, + "953\\.674Mi"), + std::make_tuple(1024 * 1024 * 1024.0, benchmark::Counter::kIs1024, + "1Gi"), + std::make_tuple(0.0, benchmark::Counter::kIs1000, "0"), + std::make_tuple(999.0, benchmark::Counter::kIs1000, "999"), + std::make_tuple(1000.0, benchmark::Counter::kIs1000, "1k"), + std::make_tuple(1024.0, benchmark::Counter::kIs1000, "1.024k"), + std::make_tuple(1000 * 1000.0, benchmark::Counter::kIs1000, "1M"), + std::make_tuple(1024 * 1024.0, benchmark::Counter::kIs1000, + "1\\.04858M"), + std::make_tuple(1000 * 1000 * 1000.0, benchmark::Counter::kIs1000, + "1G"), + std::make_tuple(1024 * 1024 * 1024.0, benchmark::Counter::kIs1000, + "1\\.07374G"))); + +TEST_P(HumanReadableFixture, HumanReadableNumber) { + std::string str = benchmark::HumanReadableNumber(std::get<0>(GetParam()), + std::get<1>(GetParam())); + ASSERT_THAT(str, ::testing::MatchesRegex(std::get<2>(GetParam()))); +} + } // end namespace diff --git a/test/time_unit_gtest.cc b/test/time_unit_gtest.cc new file mode 100644 index 0000000..484ecbc --- /dev/null +++ b/test/time_unit_gtest.cc @@ -0,0 +1,37 @@ +#include "../include/benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace internal { + +namespace { + +class DummyBenchmark : public Benchmark { + public: + DummyBenchmark() : Benchmark("dummy") {} + void Run(State&) override {} +}; + +TEST(DefaultTimeUnitTest, TimeUnitIsNotSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); +} + +TEST(DefaultTimeUnitTest, DefaultIsSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); + SetDefaultTimeUnit(kMillisecond); + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +TEST(DefaultTimeUnitTest, DefaultAndExplicitUnitIsSet) { + DummyBenchmark benchmark; + benchmark.Unit(kMillisecond); + SetDefaultTimeUnit(kMicrosecond); + + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/test/user_counters_tabular_test.cc b/test/user_counters_tabular_test.cc index 45ac043..c98b769 100644 --- a/test/user_counters_tabular_test.cc +++ b/test/user_counters_tabular_test.cc @@ -372,7 +372,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$", void BM_CounterRates_Tabular(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters.insert({ diff --git a/test/user_counters_test.cc b/test/user_counters_test.cc index 1cc7455..4cd8ee3 100644 --- a/test/user_counters_test.cc +++ b/test/user_counters_test.cc @@ -67,7 +67,8 @@ int num_calls1 = 0; void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.counters["foo"] = 1; state.counters["bar"] = ++num_calls1; @@ -118,7 +119,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", void BM_Counters_Rate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; @@ -161,7 +163,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); void BM_Invert(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; @@ -195,14 +198,14 @@ void CheckInvert(Results const& e) { CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); // ========================================================================= // -// ------------------------- InvertedRate Counters Output -// -------------------------- // +// --------------------- InvertedRate Counters Output ---------------------- // // ========================================================================= // void BM_Counters_InvertedRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = @@ -330,7 +333,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", void BM_Counters_AvgThreadsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; @@ -417,7 +421,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = @@ -460,7 +465,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate); // ========================================================================= // -// ------------------- AvgIterations Counters Output ------------------ // +// --------------------- AvgIterations Counters Output --------------------- // // ========================================================================= // void BM_Counters_AvgIterations(benchmark::State& state) { @@ -502,13 +507,14 @@ void CheckAvgIterations(Results const& e) { CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); // ========================================================================= // -// ----------------- AvgIterationsRate Counters Output ---------------- // +// ------------------- AvgIterationsRate Counters Output ------------------- // // ========================================================================= // void BM_Counters_kAvgIterationsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; diff --git a/test/user_counters_thousands_test.cc b/test/user_counters_thousands_test.cc index a42683b..fc15383 100644 --- a/test/user_counters_thousands_test.cc +++ b/test/user_counters_thousands_test.cc @@ -16,13 +16,13 @@ void BM_Counters_Thousands(benchmark::State& state) { {"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, + bm::Counter::OneK::kIs1000)}, {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, + bm::Counter::OneK::kIs1024)}, {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, + bm::Counter::OneK::kIs1000)}, {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, + bm::Counter::OneK::kIs1024)}, }); } BENCHMARK(BM_Counters_Thousands)->Repetitions(2); @@ -30,21 +30,21 @@ ADD_CASES( TC_ConsoleOut, { {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M " + "t1_1000000Base1000=1M t2_1000000Base1024=976.56[23]Ki " + "t3_1048576Base1000=1.04858M t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M " + "t1_1000000Base1000=1M t2_1000000Base1024=976.56[23]Ki " + "t3_1048576Base1000=1.04858M t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_mean %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M t1_1000000Base1000=1M " + "t2_1000000Base1024=976.56[23]Ki t3_1048576Base1000=1.04858M " + "t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_median %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M t1_1000000Base1000=1M " + "t2_1000000Base1024=976.56[23]Ki t3_1048576Base1000=1.04858M " + "t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, diff --git a/tools/BUILD.bazel b/tools/BUILD.bazel index 5895883..d25caa7 100644 --- a/tools/BUILD.bazel +++ b/tools/BUILD.bazel @@ -1,4 +1,4 @@ -load("@py_deps//:requirements.bzl", "requirement") +load("@tools_pip_deps//:requirements.bzl", "requirement") py_library( name = "gbench", @@ -12,7 +12,7 @@ py_library( py_binary( name = "compare", srcs = ["compare.py"], - python_version = "PY2", + python_version = "PY3", deps = [ ":gbench", ], diff --git a/tools/compare.py b/tools/compare.py index 01d2c89..e5eeb24 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import unittest """ @@ -9,25 +9,28 @@ import argparse from argparse import ArgumentParser import json import sys +import os import gbench from gbench import util, report -from gbench.util import * def check_inputs(in1, in2, flags): """ Perform checking on the user provided inputs and diagnose any abnormalities """ - in1_kind, in1_err = classify_input_file(in1) - in2_kind, in2_err = classify_input_file(in2) - output_file = find_benchmark_flag('--benchmark_out=', flags) - output_type = find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: + in1_kind, in1_err = util.classify_input_file(in1) + in2_kind, in2_err = util.classify_input_file(in2) + output_file = util.find_benchmark_flag('--benchmark_out=', flags) + output_type = util.find_benchmark_flag('--benchmark_out_format=', flags) + if in1_kind == util.IT_Executable and in2_kind == util.IT_Executable and output_file: print(("WARNING: '--benchmark_out=%s' will be passed to both " "benchmarks causing it to be overwritten") % output_file) - if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: - print("WARNING: passing optional flags has no effect since both " - "inputs are JSON") + if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON: + # When both sides are JSON the only supported flag is + # --benchmark_filter= + for flag in util.remove_benchmark_flags('--benchmark_filter=', flags): + print("WARNING: passing %s has no effect since both " + "inputs are JSON" % flag) if output_type is not None and output_type != 'json': print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" " is not supported.") % output_type) diff --git a/tools/gbench/Inputs/test1_run1.json b/tools/gbench/Inputs/test1_run1.json index 601e327..9daed0b 100644 --- a/tools/gbench/Inputs/test1_run1.json +++ b/tools/gbench/Inputs/test1_run1.json @@ -114,6 +114,14 @@ "real_time": 1, "cpu_time": 1, "time_unit": "s" + }, + { + "name": "BM_hasLabel", + "label": "a label", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" } ] } diff --git a/tools/gbench/Inputs/test1_run2.json b/tools/gbench/Inputs/test1_run2.json index 3cbcf39..dc52970 100644 --- a/tools/gbench/Inputs/test1_run2.json +++ b/tools/gbench/Inputs/test1_run2.json @@ -114,6 +114,14 @@ "real_time": 1, "cpu_time": 1, "time_unit": "ns" + }, + { + "name": "BM_hasLabel", + "label": "a label", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" } ] } diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 4c798ba..b2bbfb9 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -9,7 +9,6 @@ import random from scipy.stats import mannwhitneyu, gmean from numpy import array -from pandas import Timedelta class BenchmarkColor(object): @@ -43,6 +42,13 @@ UTEST_MIN_REPETITIONS = 2 UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. UTEST_COL_NAME = "_pvalue" +_TIME_UNIT_TO_SECONDS_MULTIPLIER = { + "s": 1.0, + "ms": 1e-3, + "us": 1e-6, + "ns": 1e-9, +} + def color_format(use_color, fmt_str, *args, **kwargs): """ @@ -157,9 +163,9 @@ def get_timedelta_field_as_seconds(benchmark, field_name): Get value of field_name field of benchmark, which is time with time unit time_unit, as time in seconds. """ - time_unit = benchmark['time_unit'] if 'time_unit' in benchmark else 's' - dt = Timedelta(benchmark[field_name], time_unit) - return dt / Timedelta(1, 's') + timedelta = benchmark[field_name] + time_unit = benchmark.get('time_unit', 's') + return timedelta * _TIME_UNIT_TO_SECONDS_MULTIPLIER.get(time_unit) def calculate_geomean(json): @@ -249,6 +255,7 @@ def get_difference_report( partitions = partition_benchmarks(json1, json2) for partition in partitions: benchmark_name = partition[0][0]['name'] + label = partition[0][0]['label'] if 'label' in partition[0][0] else '' time_unit = partition[0][0]['time_unit'] measurements = [] utest_results = {} @@ -289,6 +296,7 @@ def get_difference_report( aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else '' diff_report.append({ 'name': benchmark_name, + 'label': label, 'measurements': measurements, 'time_unit': time_unit, 'run_type': run_type, @@ -301,6 +309,7 @@ def get_difference_report( if lhs_gmean.any() and rhs_gmean.any(): diff_report.append({ 'name': 'OVERALL_GEOMEAN', + 'label': '', 'measurements': [{ 'real_time': lhs_gmean[0], 'cpu_time': lhs_gmean[1], @@ -450,7 +459,8 @@ class TestReportDifference(unittest.TestCase): '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], - ['OVERALL_GEOMEAN', '-0.8344', '-0.8026', '0', '0', '0', '0'] + ['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'], + ['OVERALL_GEOMEAN', '-0.8113', '-0.7779', '0', '0', '0', '0'] ] output_lines_with_header = print_difference_report( self.json_diff_report, use_color=False) @@ -467,81 +477,127 @@ class TestReportDifference(unittest.TestCase): expected_output = [ { 'name': 'BM_SameTimes', - 'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}], + 'label': '', + 'measurements': [{'time': 0.0000, 'cpu': 0.0000, + 'real_time': 10, 'real_time_other': 10, + 'cpu_time': 10, 'cpu_time_other': 10}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_2xFaster', - 'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}], + 'label': '', + 'measurements': [{'time': -0.5000, 'cpu': -0.5000, + 'real_time': 50, 'real_time_other': 25, + 'cpu_time': 50, 'cpu_time_other': 25}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_2xSlower', - 'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}], + 'label': '', + 'measurements': [{'time': 1.0000, 'cpu': 1.0000, + 'real_time': 50, 'real_time_other': 100, + 'cpu_time': 50, 'cpu_time_other': 100}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_1PercentFaster', - 'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}], + 'label': '', + 'measurements': [{'time': -0.0100, 'cpu': -0.0100, + 'real_time': 100, 'real_time_other': 98.9999999, + 'cpu_time': 100, 'cpu_time_other': 98.9999999}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_1PercentSlower', - 'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}], + 'label': '', + 'measurements': [{'time': 0.0100, 'cpu': 0.0100, + 'real_time': 100, 'real_time_other': 101, + 'cpu_time': 100, 'cpu_time_other': 101}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_10PercentFaster', - 'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}], + 'label': '', + 'measurements': [{'time': -0.1000, 'cpu': -0.1000, + 'real_time': 100, 'real_time_other': 90, + 'cpu_time': 100, 'cpu_time_other': 90}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_10PercentSlower', - 'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}], + 'label': '', + 'measurements': [{'time': 0.1000, 'cpu': 0.1000, + 'real_time': 100, 'real_time_other': 110, + 'cpu_time': 100, 'cpu_time_other': 110}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_100xSlower', - 'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}], + 'label': '', + 'measurements': [{'time': 99.0000, 'cpu': 99.0000, + 'real_time': 100, 'real_time_other': 10000, + 'cpu_time': 100, 'cpu_time_other': 10000}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_100xFaster', - 'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}], + 'label': '', + 'measurements': [{'time': -0.9900, 'cpu': -0.9900, + 'real_time': 10000, 'real_time_other': 100, + 'cpu_time': 10000, 'cpu_time_other': 100}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_10PercentCPUToTime', - 'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}], + 'label': '', + 'measurements': [{'time': 0.1000, 'cpu': -0.1000, + 'real_time': 100, 'real_time_other': 110, + 'cpu_time': 100, 'cpu_time_other': 90}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_ThirdFaster', - 'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}], + 'label': '', + 'measurements': [{'time': -0.3333, 'cpu': -0.3334, + 'real_time': 100, 'real_time_other': 67, + 'cpu_time': 100, 'cpu_time_other': 67}], 'time_unit': 'ns', 'utest': {} }, { 'name': 'BM_NotBadTimeUnit', - 'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}], + 'label': '', + 'measurements': [{'time': -0.9000, 'cpu': 0.2000, + 'real_time': 0.4, 'real_time_other': 0.04, + 'cpu_time': 0.5, 'cpu_time_other': 0.6}], + 'time_unit': 's', + 'utest': {} + }, + { + 'name': 'BM_hasLabel', + 'label': 'a label', + 'measurements': [{'time': 0.0000, 'cpu': 0.0000, + 'real_time': 1, 'real_time_other': 1, + 'cpu_time': 1, 'cpu_time_other': 1}], 'time_unit': 's', 'utest': {} }, { 'name': 'OVERALL_GEOMEAN', - 'measurements': [{'real_time': 1.193776641714438e-06, 'cpu_time': 1.2144445585302297e-06, + 'label': '', + 'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06, 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07, - 'time': -0.834399601997324, 'cpu': -0.8025889499549471}], + 'time': -0.8112976497120911, 'cpu': -0.7778551721181174}], 'time_unit': 's', 'run_type': 'aggregate', 'aggregate_name': 'geomean', 'utest': {} @@ -551,6 +607,7 @@ class TestReportDifference(unittest.TestCase): for out, expected in zip( self.json_diff_report, expected_output): self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['label'], expected['label']) self.assertEqual(out['time_unit'], expected['time_unit']) assert_utest(self, out, expected) assert_measurements(self, out, expected) diff --git a/tools/gbench/util.py b/tools/gbench/util.py index 5d0012c..5e79da8 100644 --- a/tools/gbench/util.py +++ b/tools/gbench/util.py @@ -2,10 +2,11 @@ """ import json import os -import tempfile +import re import subprocess import sys -import functools +import tempfile + # Input file type enumeration IT_Invalid = 0 @@ -58,7 +59,7 @@ def classify_input_file(filename): """ Return a tuple (type, msg) where 'type' specifies the classified type of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable - string represeting the error. + string representing the error. """ ftype = IT_Invalid err_msg = None @@ -111,13 +112,32 @@ def remove_benchmark_flags(prefix, benchmark_flags): return [f for f in benchmark_flags if not f.startswith(prefix)] -def load_benchmark_results(fname): +def load_benchmark_results(fname, benchmark_filter): """ Read benchmark output from a file and return the JSON object. + + Apply benchmark_filter, a regular expression, with nearly the same + semantics of the --benchmark_filter argument. May be None. + Note: the Python regular expression engine is used instead of the + one used by the C++ code, which may produce different results + in complex cases. + REQUIRES: 'fname' names a file containing JSON benchmark output. """ + def benchmark_wanted(benchmark): + if benchmark_filter is None: + return True + name = benchmark.get('run_name', None) or benchmark['name'] + if re.search(benchmark_filter, name): + return True + return False + with open(fname, 'r') as f: - return json.load(f) + results = json.load(f) + if 'benchmarks' in results: + results['benchmarks'] = list(filter(benchmark_wanted, + results['benchmarks'])) + return results def sort_benchmark_results(result): @@ -160,7 +180,7 @@ def run_benchmark(exe_name, benchmark_flags): if exitCode != 0: print('TEST FAILED...') sys.exit(exitCode) - json_res = load_benchmark_results(output_name) + json_res = load_benchmark_results(output_name, None) if is_temp_output: os.unlink(output_name) return json_res @@ -175,7 +195,9 @@ def run_or_load_benchmark(filename, benchmark_flags): """ ftype = check_input_file(filename) if ftype == IT_JSON: - return load_benchmark_results(filename) + benchmark_filter = find_benchmark_flag('--benchmark_filter=', + benchmark_flags) + return load_benchmark_results(filename, benchmark_filter) if ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) raise ValueError('Unknown file type %s' % ftype) diff --git a/tools/libpfm.BUILD.bazel b/tools/libpfm.BUILD.bazel new file mode 100644 index 0000000..6269534 --- /dev/null +++ b/tools/libpfm.BUILD.bazel @@ -0,0 +1,22 @@ +# Build rule for libpfm, which is required to collect performance counters for +# BENCHMARK_ENABLE_LIBPFM builds. + +load("@rules_foreign_cc//foreign_cc:defs.bzl", "make") + +filegroup( + name = "pfm_srcs", + srcs = glob(["**"]), +) + +make( + name = "libpfm", + lib_source = ":pfm_srcs", + lib_name = "libpfm", + copts = [ + "-Wno-format-truncation", + "-Wno-use-after-free", + ], + visibility = [ + "//visibility:public", + ], +) diff --git a/tools/requirements.txt b/tools/requirements.txt index 3b3331b..f32f35b 100644 --- a/tools/requirements.txt +++ b/tools/requirements.txt @@ -1 +1,2 @@ -scipy>=1.5.0 \ No newline at end of file +numpy == 1.25 +scipy == 1.10.0 diff --git a/tools/strip_asm.py b/tools/strip_asm.py index 9030550..d131dc7 100755 --- a/tools/strip_asm.py +++ b/tools/strip_asm.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ strip_asm.py - Cleanup ASM output for the specified file From 0359b467adedf95e685c5b734ec68ac87b500de9 Mon Sep 17 00:00:00 2001 From: CrazyRong Date: Sat, 20 Apr 2024 17:00:55 +0800 Subject: [PATCH 2/4] =?UTF-8?q?=E5=8D=87=E7=BA=A7benchmark=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E5=88=B0=E7=89=88=E6=9C=AC=E7=81=AB=E8=BD=A6=E8=A6=81?= =?UTF-8?q?=E6=B1=82=E7=9A=841.8.3=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: CrazyRong --- bundle.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle.json b/bundle.json index 2992320..3738263 100644 --- a/bundle.json +++ b/bundle.json @@ -32,7 +32,7 @@ "header":{ "header_files":[ "benchmark.h", - "export.h" + "export.h" ], "header_base":"//third_party/benchmark/include" } From 16ef5830aeb7f5eef29488d2a59ffa055a8e3c74 Mon Sep 17 00:00:00 2001 From: CrazyRong Date: Sat, 20 Apr 2024 18:07:05 +0800 Subject: [PATCH 3/4] =?UTF-8?q?=E5=8D=87=E7=BA=A7benchmark=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E5=88=B0=E7=89=88=E6=9C=AC=E7=81=AB=E8=BD=A6=E8=A6=81?= =?UTF-8?q?=E6=B1=82=E7=9A=841.8.3=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: CrazyRong --- bundle.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle.json b/bundle.json index 3738263..2992320 100644 --- a/bundle.json +++ b/bundle.json @@ -32,7 +32,7 @@ "header":{ "header_files":[ "benchmark.h", - "export.h" + "export.h" ], "header_base":"//third_party/benchmark/include" } From 94c9a67cd9aecb97dd90d7439915cc6f25718092 Mon Sep 17 00:00:00 2001 From: CrazyRong Date: Sat, 20 Apr 2024 18:07:49 +0800 Subject: [PATCH 4/4] =?UTF-8?q?=E5=8D=87=E7=BA=A7benchmark=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E5=88=B0=E7=89=88=E6=9C=AC=E7=81=AB=E8=BD=A6=E8=A6=81?= =?UTF-8?q?=E6=B1=82=E7=9A=841.8.3=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: CrazyRong --- bundle.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bundle.json b/bundle.json index 2992320..d2d19ee 100644 --- a/bundle.json +++ b/bundle.json @@ -32,7 +32,7 @@ "header":{ "header_files":[ "benchmark.h", - "export.h" + "export.h" ], "header_base":"//third_party/benchmark/include" }