mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-19 13:26:45 +00:00
Rename ThreadPool::getThreadCount()
to getMaxConcurrency()
(NFC) (#82296)
This is addressing a long-time TODO to rename this misleading API. The old one is preserved for now but marked deprecated.
This commit is contained in:
parent
71e0623110
commit
744616b3ae
@ -317,7 +317,8 @@ void mergeLegacyProfiles(const SmallVectorImpl<std::string> &Filenames) {
|
||||
ThreadPoolStrategy S = optimal_concurrency(
|
||||
std::max(Filenames.size() / 4, static_cast<size_t>(1)));
|
||||
ThreadPool Pool(S);
|
||||
DenseMap<llvm::thread::id, ProfileTy> ParsedProfiles(Pool.getThreadCount());
|
||||
DenseMap<llvm::thread::id, ProfileTy> ParsedProfiles(
|
||||
Pool.getMaxConcurrency());
|
||||
for (const auto &Filename : Filenames)
|
||||
Pool.async(ParseProfile, std::cref(Filename), std::ref(ParsedProfiles));
|
||||
Pool.wait();
|
||||
|
@ -870,7 +870,7 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) {
|
||||
EagerLoadModules);
|
||||
llvm::ThreadPool Pool(llvm::hardware_concurrency(NumThreads));
|
||||
std::vector<std::unique_ptr<DependencyScanningTool>> WorkerTools;
|
||||
for (unsigned I = 0; I < Pool.getThreadCount(); ++I)
|
||||
for (unsigned I = 0; I < Pool.getMaxConcurrency(); ++I)
|
||||
WorkerTools.push_back(std::make_unique<DependencyScanningTool>(Service));
|
||||
|
||||
std::vector<tooling::CompileCommand> Inputs =
|
||||
@ -894,13 +894,13 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) {
|
||||
|
||||
if (Verbose) {
|
||||
llvm::outs() << "Running clang-scan-deps on " << Inputs.size()
|
||||
<< " files using " << Pool.getThreadCount() << " workers\n";
|
||||
<< " files using " << Pool.getMaxConcurrency() << " workers\n";
|
||||
}
|
||||
|
||||
llvm::Timer T;
|
||||
T.startTimer();
|
||||
|
||||
for (unsigned I = 0; I < Pool.getThreadCount(); ++I) {
|
||||
for (unsigned I = 0; I < Pool.getMaxConcurrency(); ++I) {
|
||||
Pool.async([&, I]() {
|
||||
llvm::DenseSet<ModuleID> AlreadySeenModules;
|
||||
while (auto MaybeInputIndex = GetNextInputIndex()) {
|
||||
|
@ -104,9 +104,12 @@ public:
|
||||
/// not to waste the thread.
|
||||
void wait(ThreadPoolTaskGroup &Group);
|
||||
|
||||
// TODO: misleading legacy name warning!
|
||||
// Returns the maximum number of worker threads in the pool, not the current
|
||||
// number of threads!
|
||||
unsigned getMaxConcurrency() const { return MaxThreadCount; }
|
||||
|
||||
// TODO: misleading legacy name warning!
|
||||
LLVM_DEPRECATED("Use getMaxConcurrency instead", "getMaxConcurrency")
|
||||
unsigned getThreadCount() const { return MaxThreadCount; }
|
||||
|
||||
/// Returns true if the current thread is a worker thread of this thread pool.
|
||||
|
@ -414,7 +414,7 @@ Error DebuginfodCollection::findBinaries(StringRef Path) {
|
||||
sys::fs::recursive_directory_iterator I(Twine(Path), EC), E;
|
||||
std::mutex IteratorMutex;
|
||||
ThreadPoolTaskGroup IteratorGroup(Pool);
|
||||
for (unsigned WorkerIndex = 0; WorkerIndex < Pool.getThreadCount();
|
||||
for (unsigned WorkerIndex = 0; WorkerIndex < Pool.getMaxConcurrency();
|
||||
WorkerIndex++) {
|
||||
IteratorGroup.async([&, this]() -> void {
|
||||
std::string FilePath;
|
||||
|
@ -1537,7 +1537,7 @@ public:
|
||||
}
|
||||
|
||||
unsigned getThreadCount() override {
|
||||
return BackendThreadPool.getThreadCount();
|
||||
return BackendThreadPool.getMaxConcurrency();
|
||||
}
|
||||
};
|
||||
} // end anonymous namespace
|
||||
|
@ -68,7 +68,7 @@ LogicalResult failableParallelForEach(MLIRContext *context, IteratorT begin,
|
||||
// Otherwise, process the elements in parallel.
|
||||
llvm::ThreadPool &threadPool = context->getThreadPool();
|
||||
llvm::ThreadPoolTaskGroup tasksGroup(threadPool);
|
||||
size_t numActions = std::min(numElements, threadPool.getThreadCount());
|
||||
size_t numActions = std::min(numElements, threadPool.getMaxConcurrency());
|
||||
for (unsigned i = 0; i < numActions; ++i)
|
||||
tasksGroup.async(processFn);
|
||||
// If the current thread is a worker thread from the pool, then waiting for
|
||||
|
@ -437,7 +437,7 @@ extern "C" void mlirAsyncRuntimeAwaitAllInGroupAndExecute(AsyncGroup *group,
|
||||
}
|
||||
|
||||
extern "C" int64_t mlirAsyncRuntimGetNumWorkerThreads() {
|
||||
return getDefaultAsyncRuntime()->getThreadPool().getThreadCount();
|
||||
return getDefaultAsyncRuntime()->getThreadPool().getMaxConcurrency();
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -638,7 +638,7 @@ unsigned MLIRContext::getNumThreads() {
|
||||
if (isMultithreadingEnabled()) {
|
||||
assert(impl->threadPool &&
|
||||
"multi-threading is enabled but threadpool not set");
|
||||
return impl->threadPool->getThreadCount();
|
||||
return impl->threadPool->getMaxConcurrency();
|
||||
}
|
||||
// No multithreading or active thread pool. Return 1 thread.
|
||||
return 1;
|
||||
|
@ -748,7 +748,7 @@ void OpToOpPassAdaptor::runOnOperationAsyncImpl(bool verifyPasses) {
|
||||
// Create the async executors if they haven't been created, or if the main
|
||||
// pipeline has changed.
|
||||
if (asyncExecutors.empty() || hasSizeMismatch(asyncExecutors.front(), mgrs))
|
||||
asyncExecutors.assign(context->getThreadPool().getThreadCount(), mgrs);
|
||||
asyncExecutors.assign(context->getThreadPool().getMaxConcurrency(), mgrs);
|
||||
|
||||
// This struct represents the information for a single operation to be
|
||||
// scheduled on a pass manager.
|
||||
|
Loading…
x
Reference in New Issue
Block a user