mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-18 18:46:40 +00:00

Instead of building the benchmarks separately via CMake and running them separately from the test suite, this patch merges the benchmarks into the test suite and handles both uniformly. As a result: - It is now possible to run individual benchmarks like we run tests (e.g. using libcxx-lit), which is a huge quality-of-life improvement. - The benchmarks will be run under exactly the same configuration as the rest of the tests, which is a nice simplification. This does mean that one has to be careful to enable the desired optimization flags when running benchmarks, but that is easy with e.g. `libcxx-lit <...> --param optimization=speed`. - Benchmarks can use the same annotations as the rest of the test suite, such as `// UNSUPPORTED` & friends. When running the tests via `check-cxx`, we only compile the benchmarks because running them would be too time consuming. This introduces a bit of complexity in the testing setup, and instead it would be better to allow passing a --dry-run flag to GoogleBenchmark executables, which is the topic of https://github.com/google/benchmark/issues/1827. I am not really satisfied with the layering violation of adding the %{benchmark_flags} substitution to cmake-bridge, however I believe this can be improved in the future.
86 lines
2.0 KiB
C++
86 lines
2.0 KiB
C++
//===----------------------------------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UNSUPPORTED: c++03, c++11, c++14, c++17
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
|
|
#include <benchmark/benchmark.h>
|
|
#include "test_macros.h"
|
|
|
|
void run_sizes(auto benchmark) {
|
|
benchmark->Arg(1)
|
|
->Arg(2)
|
|
->Arg(3)
|
|
->Arg(4)
|
|
->Arg(5)
|
|
->Arg(6)
|
|
->Arg(7)
|
|
->Arg(8)
|
|
->Arg(9)
|
|
->Arg(10)
|
|
->Arg(11)
|
|
->Arg(12)
|
|
->Arg(13)
|
|
->Arg(14)
|
|
->Arg(15)
|
|
->Arg(16)
|
|
->Arg(17)
|
|
->Arg(18)
|
|
->Arg(19)
|
|
->Arg(20)
|
|
->Arg(21)
|
|
->Arg(22)
|
|
->Arg(23)
|
|
->Arg(24)
|
|
->Arg(25)
|
|
->Arg(26)
|
|
->Arg(27)
|
|
->Arg(28)
|
|
->Arg(29)
|
|
->Arg(30)
|
|
->Arg(31)
|
|
->Arg(32)
|
|
->Arg(64)
|
|
->Arg(512)
|
|
->Arg(1024)
|
|
->Arg(4000)
|
|
->Arg(4096)
|
|
->Arg(5500)
|
|
->Arg(64000)
|
|
->Arg(65536)
|
|
->Arg(70000);
|
|
}
|
|
|
|
template <class T>
|
|
static void BM_std_min(benchmark::State& state) {
|
|
std::vector<T> vec(state.range(), 3);
|
|
|
|
for (auto _ : state) {
|
|
benchmark::DoNotOptimize(vec);
|
|
benchmark::DoNotOptimize(std::ranges::min(vec));
|
|
}
|
|
}
|
|
BENCHMARK(BM_std_min<char>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<short>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<int>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<long long>)->Apply(run_sizes);
|
|
#ifndef TEST_HAS_NO_INT128
|
|
BENCHMARK(BM_std_min<__int128>)->Apply(run_sizes);
|
|
#endif
|
|
BENCHMARK(BM_std_min<unsigned char>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<unsigned short>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<unsigned int>)->Apply(run_sizes);
|
|
BENCHMARK(BM_std_min<unsigned long long>)->Apply(run_sizes);
|
|
#ifndef TEST_HAS_NO_INT128
|
|
BENCHMARK(BM_std_min<unsigned __int128>)->Apply(run_sizes);
|
|
#endif
|
|
|
|
BENCHMARK_MAIN();
|