[libc++] Unify the benchmarks with the test suite (#101399)

Instead of building the benchmarks separately via CMake and running them
separately from the test suite, this patch merges the benchmarks into
the test suite and handles both uniformly.

As a result:
- It is now possible to run individual benchmarks like we run tests
  (e.g. using libcxx-lit), which is a huge quality-of-life improvement.

- The benchmarks will be run under exactly the same configuration as
  the rest of the tests, which is a nice simplification. This does
  mean that one has to be careful to enable the desired optimization
  flags when running benchmarks, but that is easy with e.g.
  `libcxx-lit <...> --param optimization=speed`.

- Benchmarks can use the same annotations as the rest of the test
  suite, such as `// UNSUPPORTED` & friends.

When running the tests via `check-cxx`, we only compile the benchmarks
because running them would be too time consuming. This introduces a bit
of complexity in the testing setup, and instead it would be better to
allow passing a --dry-run flag to GoogleBenchmark executables, which is
the topic of https://github.com/google/benchmark/issues/1827.

I am not really satisfied with the layering violation of adding the
%{benchmark_flags} substitution to cmake-bridge, however I believe
this can be improved in the future.
This commit is contained in:
Louis Dionne 2024-11-07 09:07:50 -05:00 committed by GitHub
parent 4f24d0355a
commit e236a52a88
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 105 additions and 413 deletions

View File

@ -159,9 +159,6 @@ jobs:
'generic-no-rtti',
'generic-optimized-speed',
'generic-static',
# TODO Find a better place for the benchmark and bootstrapping builds to live. They're either very expensive
# or don't provide much value since the benchmark run results are too noise on the bots.
'benchmarks',
'bootstrapping-build'
]
machine: [ 'libcxx-runners-set' ]

View File

@ -150,12 +150,19 @@ message(STATUS "Using libc++ testing configuration: ${LIBCXX_TEST_CONFIG}")
set(LIBCXX_TEST_PARAMS "" CACHE STRING
"A list of parameters to run the Lit test suite with.")
# Benchmark options -----------------------------------------------------------
option(LIBCXX_INCLUDE_BENCHMARKS "Build the libc++ benchmarks and their dependencies" ON)
set(LIBCXX_BENCHMARK_TEST_ARGS_DEFAULT --benchmark_min_time=0.01)
set(LIBCXX_BENCHMARK_TEST_ARGS "${LIBCXX_BENCHMARK_TEST_ARGS_DEFAULT}" CACHE STRING
"Arguments to pass when running the benchmarks using check-cxx-benchmarks")
# TODO: Figure out how to build GoogleBenchmark on those platforms, and how to build when exceptions or RTTI is disabled
if (WIN32 OR MINGW OR ANDROID OR ${CMAKE_SYSTEM_NAME} MATCHES "AIX"
OR NOT LIBCXX_ENABLE_LOCALIZATION
OR NOT LIBCXX_ENABLE_THREADS
OR NOT LIBCXX_ENABLE_FILESYSTEM
OR NOT LIBCXX_ENABLE_RANDOM_DEVICE
OR NOT LIBCXX_ENABLE_EXCEPTIONS
OR NOT LIBCXX_ENABLE_RTTI)
set(_include_benchmarks OFF)
else()
set(_include_benchmarks ON)
endif()
option(LIBCXX_INCLUDE_BENCHMARKS "Build the libc++ benchmarks and their dependencies" ${_include_benchmarks})
option(LIBCXX_INCLUDE_DOCS "Build the libc++ documentation." ${LLVM_INCLUDE_DOCS})
set(LIBCXX_LIBDIR_SUFFIX "${LLVM_LIBDIR_SUFFIX}" CACHE STRING

View File

@ -392,6 +392,10 @@ Test Filenames`_ when determining the names for new test files.
of Lit test to be executed. This can be used to generate multiple Lit tests from a single source file, which is useful for testing repetitive properties
in the library. Be careful not to abuse this since this is not a replacement for usual code reuse techniques.
* - ``FOO.bench.cpp``
- A benchmark test. These tests are linked against the GoogleBenchmark library and generally consist of micro-benchmarks of individual
components of the library.
libc++-Specific Lit Features
----------------------------
@ -438,44 +442,30 @@ Libc++ contains benchmark tests separately from the test of the test suite.
The benchmarks are written using the `Google Benchmark`_ library, a copy of which
is stored in the libc++ repository.
For more information about using the Google Benchmark library see the
For more information about using the Google Benchmark library, see the
`official documentation <https://github.com/google/benchmark>`_.
The benchmarks are located under ``libcxx/test/benchmarks``. Running a benchmark
works in the same way as running a test. Both the benchmarks and the tests share
the same configuration, so make sure to enable the relevant optimization level
when running the benchmarks. For example,
.. code-block:: bash
$ libcxx/utils/libcxx-lit <build> -sv libcxx/test/benchmarks/string.bench.cpp --param optimization=speed
If you want to see where a benchmark is located (e.g. you want to store the executable
for subsequent analysis), you can print that information by passing ``--show-all`` to
``lit``. That will print the command-lines being executed, which includes the location
of the executable created for that benchmark.
Note that benchmarks are only dry-run when run via the ``check-cxx`` target since
we only want to make sure they don't rot. Do not rely on the results of benchmarks
run through ``check-cxx`` for anything, instead run the benchmarks manually using
the instructions for running individual tests.
.. _`Google Benchmark`: https://github.com/google/benchmark
Building Benchmarks
-------------------
The benchmark tests are not built by default. The benchmarks can be built using
the ``cxx-benchmarks`` target.
An example build would look like:
.. code-block:: bash
$ ninja -C build cxx-benchmarks
This will build all of the benchmarks under ``<libcxx>/test/benchmarks`` to be
built against the just-built libc++. The compiled tests are output into
``build/libcxx/test/benchmarks``.
Running Benchmarks
------------------
The benchmarks must be run manually by the user. Currently there is no way
to run them as part of the build.
For example:
.. code-block:: bash
$ cd build/libcxx/test/benchmarks
$ ./find.bench.out # Runs all the benchmarks
$ ./find.bench.out --benchmark_filter="bm_ranges_find<std::vector<char>>" # Only runs that specific benchmark
For more information about running benchmarks see `Google Benchmark`_.
.. _testing-hardening-assertions:
Testing hardening assertions
@ -518,4 +508,3 @@ A toy example:
Note that error messages are only tested (matched) if the ``debug``
hardening mode is used.

View File

@ -244,7 +244,8 @@ General purpose options
**Default**: ``ON`` (or value of ``LLVM_INCLUDE_TESTS``)
Build the libc++ tests.
Build the libc++ test suite, which includes various types of tests like conformance
tests, vendor-specific tests and benchmarks.
.. option:: LIBCXX_INCLUDE_BENCHMARKS:BOOL
@ -253,15 +254,6 @@ General purpose options
Build the libc++ benchmark tests and the Google Benchmark library needed
to support them.
.. option:: LIBCXX_BENCHMARK_TEST_ARGS:STRING
**Default**: ``--benchmark_min_time=0.01``
A semicolon list of arguments to pass when running the libc++ benchmarks using the
``check-cxx-benchmarks`` rule. By default we run the benchmarks for a very short amount of time,
since the primary use of ``check-cxx-benchmarks`` is to get test and sanitizer coverage, not to
get accurate measurements.
.. option:: LIBCXX_ASSERTION_HANDLER_FILE:PATH
**Default**:: ``"${CMAKE_CURRENT_SOURCE_DIR}/vendor/llvm/default_assertion_handler.in"``

View File

@ -1,10 +1,6 @@
include(HandleLitArguments)
add_subdirectory(tools)
if (LIBCXX_INCLUDE_BENCHMARKS)
add_subdirectory(benchmarks)
endif()
# Install the library at a fake location so we can run the test suite against it.
# This ensures that we run the test suite against a setup that matches what we ship
# in production as closely as possible (in terms of file paths, rpaths, etc).
@ -66,6 +62,14 @@ set(SERIALIZED_LIT_PARAMS "# Lit parameters serialized here for llvm-lit to pick
serialize_lit_string_param(SERIALIZED_LIT_PARAMS compiler "${CMAKE_CXX_COMPILER}")
if (LIBCXX_INCLUDE_BENCHMARKS)
add_subdirectory(benchmarks)
set(_libcxx_benchmark_mode "dry-run")
else()
serialize_lit_string_param(SERIALIZED_LIT_PARAMS enable_benchmarks "no")
set(_libcxx_benchmark_mode "no")
endif()
if (NOT LIBCXX_ENABLE_EXCEPTIONS)
serialize_lit_param(SERIALIZED_LIT_PARAMS enable_exceptions False)
endif()
@ -102,4 +106,5 @@ configure_lit_site_cfg(
add_lit_testsuite(check-cxx
"Running libcxx tests"
${CMAKE_CURRENT_BINARY_DIR}
PARAMS enable_benchmarks="${_libcxx_benchmark_mode}"
DEPENDS cxx-test-depends)

View File

@ -1,10 +1,8 @@
include(ExternalProject)
include(CheckCXXCompilerFlag)
#==============================================================================
# Build Google Benchmark
#==============================================================================
include(ExternalProject)
set(BENCHMARK_COMPILE_FLAGS
-Wno-unused-command-line-argument
-nostdinc++
@ -24,6 +22,12 @@ if (DEFINED LIBCXX_CXX_ABI_LIBRARY_PATH)
endif()
split_list(BENCHMARK_COMPILE_FLAGS)
set(BENCHMARK_CXX_LIBRARIES)
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
if (NOT LIBCXX_ENABLE_SHARED)
list(APPEND BENCHMARK_CXX_LIBRARIES c++abi)
endif()
ExternalProject_Add(google-benchmark
EXCLUDE_FROM_ALL ON
DEPENDS cxx cxx-headers
@ -37,176 +41,7 @@ ExternalProject_Add(google-benchmark
-DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
-DCMAKE_CXX_FLAGS:STRING=${BENCHMARK_COMPILE_FLAGS}
-DBENCHMARK_USE_LIBCXX:BOOL=ON
-DBENCHMARK_ENABLE_TESTING:BOOL=OFF)
-DBENCHMARK_ENABLE_TESTING:BOOL=OFF
-DBENCHMARK_CXX_LIBRARIES:STRING="${BENCHMARK_CXX_LIBRARIES}")
#==============================================================================
# Benchmark tests configuration
#==============================================================================
add_custom_target(cxx-benchmarks)
set(BENCHMARK_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(BENCHMARK_INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR}/google-benchmark)
add_library( cxx-benchmarks-flags INTERFACE)
# TODO(cmake): remove. This is a workaround to prevent older versions of GCC
# from failing the configure step because they don't support C++23.
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "13.0")
return()
endif()
#TODO(cmake): remove the `add_compile_options`. Currently we have to explicitly
# pass the `std:c++latest` flag on Windows to work around an issue where
# requesting `cxx_std_23` results in an error -- somehow CMake fails to
# translate the `c++23` flag into `c++latest`, and the highest numbered C++
# version that MSVC flags support is C++20.
if (MSVC)
add_compile_options(/std:c++latest)
# ibm-clang does not recognize the cxx_std_23 flag, so use this as a temporary
# workaround on AIX as well.
elseif (${CMAKE_SYSTEM_NAME} MATCHES "AIX")
add_compile_options(-std=c++23)
else()
target_compile_features( cxx-benchmarks-flags INTERFACE cxx_std_23)
endif()
target_compile_options(cxx-benchmarks-flags INTERFACE -fsized-deallocation -nostdinc++
${SANITIZER_FLAGS} -Wno-user-defined-literals -Wno-suggest-override)
target_include_directories(cxx-benchmarks-flags INTERFACE "${LIBCXX_GENERATED_INCLUDE_DIR}"
INTERFACE "${BENCHMARK_INSTALL_DIR}/include"
INTERFACE "${LIBCXX_SOURCE_DIR}/test/support")
target_link_options(cxx-benchmarks-flags INTERFACE -lm -nostdlib++
"-L${BENCHMARK_INSTALL_DIR}/lib" "-L${BENCHMARK_INSTALL_DIR}/lib64"
${SANITIZER_FLAGS})
set(libcxx_benchmark_targets)
function(add_benchmark_test name source_file)
set(libcxx_target ${name}_libcxx)
list(APPEND libcxx_benchmark_targets ${libcxx_target})
add_executable(${libcxx_target} EXCLUDE_FROM_ALL ${source_file})
target_link_libraries(${libcxx_target} PRIVATE cxx-benchmarks-flags)
add_dependencies(${libcxx_target} cxx google-benchmark)
add_dependencies(cxx-benchmarks ${libcxx_target})
if (LIBCXX_ENABLE_SHARED)
target_link_libraries(${libcxx_target} PRIVATE cxx_shared)
else()
target_link_libraries(${libcxx_target} PRIVATE cxx_static)
endif()
target_link_libraries(${libcxx_target} PRIVATE cxx_experimental benchmark)
if (LLVM_USE_SANITIZER)
target_link_libraries(${libcxx_target} PRIVATE -ldl)
endif()
set_target_properties(${libcxx_target}
PROPERTIES
OUTPUT_NAME "${name}.bench.out"
RUNTIME_OUTPUT_DIRECTORY "${BENCHMARK_OUTPUT_DIR}"
CXX_EXTENSIONS NO)
cxx_link_system_libraries(${libcxx_target})
endfunction()
#==============================================================================
# Register Benchmark tests
#==============================================================================
set(BENCHMARK_TESTS
algorithms.partition_point.bench.cpp
algorithms/count.bench.cpp
algorithms/equal.bench.cpp
algorithms/find.bench.cpp
algorithms/fill.bench.cpp
algorithms/for_each.bench.cpp
algorithms/lexicographical_compare.bench.cpp
algorithms/lower_bound.bench.cpp
algorithms/make_heap.bench.cpp
algorithms/make_heap_then_sort_heap.bench.cpp
algorithms/min.bench.cpp
algorithms/minmax.bench.cpp
algorithms/min_max_element.bench.cpp
algorithms/mismatch.bench.cpp
algorithms/pop_heap.bench.cpp
algorithms/pstl.stable_sort.bench.cpp
algorithms/push_heap.bench.cpp
algorithms/ranges_contains.bench.cpp
algorithms/ranges_ends_with.bench.cpp
algorithms/ranges_make_heap.bench.cpp
algorithms/ranges_make_heap_then_sort_heap.bench.cpp
algorithms/ranges_pop_heap.bench.cpp
algorithms/ranges_push_heap.bench.cpp
algorithms/ranges_sort.bench.cpp
algorithms/ranges_sort_heap.bench.cpp
algorithms/ranges_stable_sort.bench.cpp
algorithms/set_intersection.bench.cpp
algorithms/sort.bench.cpp
algorithms/sort_heap.bench.cpp
algorithms/stable_sort.bench.cpp
atomic_wait.bench.cpp
atomic_wait_vs_mutex_lock.bench.cpp
libcxxabi/dynamic_cast.bench.cpp
libcxxabi/dynamic_cast_old_stress.bench.cpp
allocation.bench.cpp
deque.bench.cpp
deque_iterator.bench.cpp
exception_ptr.bench.cpp
filesystem.bench.cpp
format/write_double_comparison.bench.cpp
format/write_int_comparison.bench.cpp
format/write_string_comparison.bench.cpp
format_to_n.bench.cpp
format_to.bench.cpp
format.bench.cpp
formatted_size.bench.cpp
formatter_float.bench.cpp
formatter_int.bench.cpp
function.bench.cpp
hash.bench.cpp
join_view.bench.cpp
lexicographical_compare_three_way.bench.cpp
map.bench.cpp
monotonic_buffer.bench.cpp
numeric/gcd.bench.cpp
ordered_set.bench.cpp
shared_mutex_vs_mutex.bench.cpp
stop_token.bench.cpp
std_format_spec_string_unicode.bench.cpp
std_format_spec_string_unicode_escape.bench.cpp
string.bench.cpp
stringstream.bench.cpp
system_error.bench.cpp
to_chars.bench.cpp
unordered_set_operations.bench.cpp
util_smartptr.bench.cpp
variant_visit_1.bench.cpp
variant_visit_2.bench.cpp
variant_visit_3.bench.cpp
vector_operations.bench.cpp
)
foreach(test_path ${BENCHMARK_TESTS})
get_filename_component(test_file "${test_path}" NAME)
string(REPLACE ".bench.cpp" "" test_name "${test_file}")
if (NOT DEFINED ${test_name}_REPORTED)
message(STATUS "Adding Benchmark: ${test_file}")
# Only report the adding of the benchmark once.
set(${test_name}_REPORTED ON CACHE INTERNAL "")
endif()
add_benchmark_test(${test_name} ${test_path})
endforeach()
if (LIBCXX_INCLUDE_TESTS)
include(AddLLVM)
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/lit.cfg.py)
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py)
set(BENCHMARK_LIT_ARGS "--show-all --show-xfail --show-unsupported ${LIT_ARGS_DEFAULT}")
add_lit_target(check-cxx-benchmarks
"Running libcxx benchmarks tests"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS cxx-benchmarks cxx-test-depends
ARGS ${BENCHMARK_LIT_ARGS})
endif()
add_dependencies(cxx-test-depends google-benchmark)

View File

@ -78,6 +78,8 @@ BENCHMARK(BM_std_min<unsigned char>)->Apply(run_sizes);
BENCHMARK(BM_std_min<unsigned short>)->Apply(run_sizes);
BENCHMARK(BM_std_min<unsigned int>)->Apply(run_sizes);
BENCHMARK(BM_std_min<unsigned long long>)->Apply(run_sizes);
#ifndef TEST_HAS_NO_INT128
BENCHMARK(BM_std_min<unsigned __int128>)->Apply(run_sizes);
#endif
BENCHMARK_MAIN();

View File

@ -8,10 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17
// To run this test, build libcxx and cxx-benchmarks targets
// cd third-party/benchmark/tools
// ./compare.py filters ../../../build/libcxx/benchmarks/atomic_wait_vs_mutex_lock.libcxx.out BM_atomic_wait BM_mutex
#include <atomic>
#include <mutex>
#include <numeric>

View File

@ -41,6 +41,7 @@ BENCHMARK(BM_Basic<uint64_t>);
BENCHMARK(BM_Basic<int64_t>);
// Ideally the low values of a 128-bit value are all dispatched to a 64-bit routine.
#ifndef TEST_HAS_NO_INT128
template <class T>
static void BM_BasicLow(benchmark::State& state) {
using U = std::conditional_t<std::is_signed_v<T>, int64_t, uint64_t>;
@ -52,7 +53,6 @@ static void BM_BasicLow(benchmark::State& state) {
for (auto value : data)
benchmark::DoNotOptimize(std::format_to(output.begin(), "{}", value));
}
#ifndef TEST_HAS_NO_INT128
BENCHMARK(BM_BasicLow<__uint128_t>);
BENCHMARK(BM_BasicLow<__int128_t>);

View File

@ -1,23 +0,0 @@
# -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
# Configuration file for the 'lit' test runner.
import os
import site
site.addsitedir(os.path.join("@LIBCXX_SOURCE_DIR@", "utils"))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if "PYLINT_IMPORT" in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = "libc++ benchmarks"
config.suffixes = []
config.test_exec_root = "@CMAKE_CURRENT_BINARY_DIR@"
config.test_source_root = "@CMAKE_CURRENT_BINARY_DIR@"
config.test_format = GoogleBenchmark(
test_sub_dirs=".", test_suffix=".bench.out", benchmark_args=config.benchmark_args
)

View File

@ -1,10 +0,0 @@
@LIT_SITE_CFG_IN_HEADER@
import sys
config.libcxx_src_root = "@LIBCXX_SOURCE_DIR@"
config.libcxx_obj_root = "@LIBCXX_BINARY_DIR@"
config.benchmark_args = "@LIBCXX_BENCHMARK_TEST_ARGS@".split(';')
# Let the main config do the real work.
lit_config.load_config(config, "@CMAKE_CURRENT_BINARY_DIR@/lit.cfg.py")

View File

@ -6,6 +6,8 @@
//
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++03
#include <memory>
#include "benchmark/benchmark.h"

View File

@ -30,3 +30,4 @@ config.substitutions.append(('%{target-include-dir}', '@LIBCXX_TESTING_INSTALL_P
config.substitutions.append(('%{lib-dir}', '@LIBCXX_TESTING_INSTALL_PREFIX@/@LIBCXX_INSTALL_LIBRARY_DIR@'))
config.substitutions.append(('%{module-dir}', '@LIBCXX_TESTING_INSTALL_PREFIX@/@LIBCXX_INSTALL_MODULES_DIR@'))
config.substitutions.append(('%{test-tools-dir}', '@LIBCXX_TEST_TOOLS_PATH@'))
config.substitutions.append(('%{benchmark_flags}', '-I @LIBCXX_BINARY_DIR@/test/benchmarks/google-benchmark/include -L @LIBCXX_BINARY_DIR@/test/benchmarks/google-benchmark/lib -l benchmark'))

View File

@ -35,6 +35,9 @@ definitions:
- "**/test-results.xml"
- "**/*.abilist"
- "**/crash_diagnostics/*"
- "**/CMakeConfigureLog.yaml"
- "**/CMakeError.log"
- "**/CMakeOutput.log"
steps:
- group: ARM

View File

@ -187,11 +187,6 @@ function check-abi-list() {
)
}
function check-cxx-benchmarks() {
step "Running the benchmarks"
${NINJA} -vC "${BUILD_DIR}" check-cxx-benchmarks
}
function test-armv7m-picolibc() {
clean
@ -640,11 +635,6 @@ apple-system)
step "Running the libunwind tests"
${NINJA} -vC "${BUILD_DIR}/unwind" check-unwind
;;
benchmarks)
clean
generate-cmake
check-cxx-benchmarks
;;
aarch64)
clean
generate-cmake -C "${MONOREPO_ROOT}/libcxx/cmake/caches/AArch64.cmake"

View File

@ -52,7 +52,7 @@ def configure(parameters, features, config, lit_config):
)
# Print the basic substitutions
for sub in ("%{cxx}", "%{flags}", "%{compile_flags}", "%{link_flags}", "%{exec}"):
for sub in ("%{cxx}", "%{flags}", "%{compile_flags}", "%{link_flags}", "%{benchmark_flags}", "%{exec}"):
note("Using {} substitution: '{}'".format(sub, _getSubstitution(sub, config)))
# Print all available features

View File

@ -29,7 +29,7 @@ def _getTempPaths(test):
def _checkBaseSubstitutions(substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ["%{cxx}", "%{compile_flags}", "%{link_flags}", "%{flags}", "%{exec}"]:
for s in ["%{cxx}", "%{compile_flags}", "%{link_flags}", "%{benchmark_flags}", "%{flags}", "%{exec}"]:
assert s in substitutions, "Required substitution {} was not provided".format(s)
def _executeScriptInternal(test, litConfig, commands):
@ -220,11 +220,15 @@ class CxxStandardLibraryTest(lit.formats.FileBasedTest):
The test format operates by assuming that each test's configuration provides
the following substitutions, which it will reuse in the shell scripts it
constructs:
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{exec} - A command to prefix the execution of executables
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{benchmark_flags} - Flags to use when compiling benchmarks. These flags should provide access to
GoogleBenchmark but shouldn't hardcode any optimization level or other settings,
since the benchmarks should be run under the same configuration as the rest of
the test suite.
%{exec} - A command to prefix the execution of executables
Note that when building an executable (as opposed to only compiling a source
file), all three of %{flags}, %{compile_flags} and %{link_flags} will be used
@ -254,6 +258,7 @@ class CxxStandardLibraryTest(lit.formats.FileBasedTest):
def getTestsForPath(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = [
"[.]bench[.]cpp$",
"[.]pass[.]cpp$",
"[.]pass[.]mm$",
"[.]compile[.]pass[.]cpp$",
@ -331,6 +336,20 @@ class CxxStandardLibraryTest(lit.formats.FileBasedTest):
"%dbg(EXECUTED AS) %{exec} %t.exe",
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith(".bench.cpp"):
if "enable-benchmarks=no" in test.config.available_features:
return lit.Test.Result(
lit.Test.UNSUPPORTED,
"Test {} requires support for benchmarks, which isn't supported by this configuration".format(
test.getFullName()
),
)
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} %{benchmark_flags} -o %t.exe",
]
if "enable-benchmarks=run" in test.config.available_features:
steps += ["%dbg(EXECUTED AS) %{exec} %t.exe"]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(
lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename)

View File

@ -1,125 +0,0 @@
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ["win32", "cygwin"]
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(";")
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += ".exe"
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + ".py"}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, "--benchmark_list_tests"]
try:
output = subprocess.check_output(list_test_cmd, env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output)
)
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index * 2 : index * 2 + 2] == " ":
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index * 2 :]
if ln.endswith("."):
nested_tests.append(ln)
elif any([name.startswith("DISABLED_") for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield "".join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(
testSuite, testPath, localConfig, file_path=execpath
)
def execute(self, test, litConfig):
testPath, testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + "/" + testName
cmd = [testPath, "--benchmark_filter=%s$" % testName] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ""
try:
out, err, exitCode = lit.util.executeCommand(
cmd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime,
)
except lit.util.ExecuteCommandTimeoutException:
return (
lit.Test.TIMEOUT,
"Reached timeout of {} seconds".format(litConfig.maxIndividualTestTime),
)
if exitCode:
return lit.Test.FAIL, ("exit code: %d\n" % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = "Unable to find %r in google benchmark output:\n\n%s%s" % (
passing_test_line,
out,
err,
)
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out

View File

@ -364,6 +364,16 @@ DEFAULT_PARAMETERS = [
AddFeature("libcpp-has-no-experimental-syncstream"),
],
),
# TODO: This can be improved once we use a version of GoogleBenchmark that supports the dry-run mode.
# See https://github.com/google/benchmark/issues/1827.
Parameter(
name="enable_benchmarks",
choices=["no", "run", "dry-run"],
type=str,
default="run",
help="Whether to run the benchmarks in the test suite, to only dry-run them or to disable them entirely.",
actions=lambda mode: [AddFeature(f"enable-benchmarks={mode}")],
),
Parameter(
name="long_tests",
choices=[True, False],

View File

@ -32,6 +32,7 @@ config.substitutions.append(('%{include}', '@LIBCXXABI_TESTING_INSTALL_PREFIX@/i
config.substitutions.append(('%{cxx-include}', '@LIBCXXABI_TESTING_INSTALL_PREFIX@/@LIBCXXABI_INSTALL_INCLUDE_DIR@'))
config.substitutions.append(('%{cxx-target-include}', '@LIBCXXABI_TESTING_INSTALL_PREFIX@/include/%{triple}/c++/v1'))
config.substitutions.append(('%{lib}', '@LIBCXXABI_TESTING_INSTALL_PREFIX@/@LIBCXXABI_INSTALL_LIBRARY_DIR@'))
config.substitutions.append(('%{benchmark_flags}', ''))
if @LIBCXXABI_USE_LLVM_UNWINDER@:
config.substitutions.append(('%{maybe-include-libunwind}', '-I "@LIBCXXABI_LIBUNWIND_INCLUDES_INTERNAL@"'))

View File

@ -32,3 +32,4 @@ if not @LIBUNWIND_ENABLE_THREADS@:
config.substitutions.append(('%{install-prefix}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@'))
config.substitutions.append(('%{include}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/include'))
config.substitutions.append(('%{lib}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/@LIBUNWIND_INSTALL_LIBRARY_DIR@'))
config.substitutions.append(('%{benchmark_flags}', ''))