Split up Parallel and LLVM'ize naming conventions.

This is one step in preparation of raising this up to
LLVM.  This hides all of the Executor stuff in a private
implementation file, leaving only the core algorithms and
the TaskGroup class exposed.  In doing so, fix up all the
variable names to conform to LLVM style.

Differential Revision: https://reviews.llvm.org/D32890

llvm-svn: 302288
This commit is contained in:
Zachary Turner 2017-05-05 21:09:26 +00:00
parent 64b6c78e94
commit f7ca8fcd6a
5 changed files with 260 additions and 223 deletions

View File

@ -10,16 +10,12 @@
#ifndef LLD_CORE_PARALLEL_H
#define LLD_CORE_PARALLEL_H
#include "lld/Core/Instrumentation.h"
#include "lld/Core/LLVM.h"
#include "lld/Core/TaskGroup.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/thread.h"
#include "llvm/config/llvm-config.h"
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <stack>
#if defined(_MSC_VER) && LLVM_ENABLE_THREADS
#include <concrt.h>
@ -27,249 +23,84 @@
#endif
namespace lld {
/// \brief Allows one or more threads to wait on a potentially unknown number of
/// events.
///
/// A latch starts at \p count. inc() increments this, and dec() decrements it.
/// All calls to sync() will block while the count is not 0.
///
/// Calling dec() on a Latch with a count of 0 has undefined behaivor.
class Latch {
uint32_t _count;
mutable std::mutex _condMut;
mutable std::condition_variable _cond;
public:
explicit Latch(uint32_t count = 0) : _count(count) {}
~Latch() { sync(); }
void inc() {
std::unique_lock<std::mutex> lock(_condMut);
++_count;
}
void dec() {
std::unique_lock<std::mutex> lock(_condMut);
if (--_count == 0)
_cond.notify_all();
}
void sync() const {
std::unique_lock<std::mutex> lock(_condMut);
_cond.wait(lock, [&] {
return _count == 0;
});
}
};
// Classes in this namespace are implementation details of this header.
namespace internal {
/// \brief An abstract class that takes closures and runs them asynchronously.
class Executor {
public:
virtual ~Executor() = default;
virtual void add(std::function<void()> func) = 0;
};
#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
class SyncExecutor : public Executor {
public:
virtual void add(std::function<void()> func) {
func();
}
};
inline Executor *getDefaultExecutor() {
static SyncExecutor exec;
return &exec;
}
#elif defined(_MSC_VER)
/// \brief An Executor that runs tasks via ConcRT.
class ConcRTExecutor : public Executor {
struct Taskish {
Taskish(std::function<void()> task) : _task(task) {}
std::function<void()> _task;
static void run(void *p) {
Taskish *self = static_cast<Taskish *>(p);
self->_task();
concurrency::Free(self);
}
};
public:
virtual void add(std::function<void()> func) {
Concurrency::CurrentScheduler::ScheduleTask(Taskish::run,
new (concurrency::Alloc(sizeof(Taskish))) Taskish(func));
}
};
inline Executor *getDefaultExecutor() {
static ConcRTExecutor exec;
return &exec;
}
#else
/// \brief An implementation of an Executor that runs closures on a thread pool
/// in filo order.
class ThreadPoolExecutor : public Executor {
public:
explicit ThreadPoolExecutor(unsigned threadCount =
std::thread::hardware_concurrency())
: _stop(false), _done(threadCount) {
// Spawn all but one of the threads in another thread as spawning threads
// can take a while.
std::thread([&, threadCount] {
for (size_t i = 1; i < threadCount; ++i) {
std::thread([=] {
work();
}).detach();
}
work();
}).detach();
}
~ThreadPoolExecutor() override {
std::unique_lock<std::mutex> lock(_mutex);
_stop = true;
lock.unlock();
_cond.notify_all();
// Wait for ~Latch.
}
void add(std::function<void()> f) override {
std::unique_lock<std::mutex> lock(_mutex);
_workStack.push(f);
lock.unlock();
_cond.notify_one();
}
private:
void work() {
while (true) {
std::unique_lock<std::mutex> lock(_mutex);
_cond.wait(lock, [&] {
return _stop || !_workStack.empty();
});
if (_stop)
break;
auto task = _workStack.top();
_workStack.pop();
lock.unlock();
task();
}
_done.dec();
}
std::atomic<bool> _stop;
std::stack<std::function<void()>> _workStack;
std::mutex _mutex;
std::condition_variable _cond;
Latch _done;
};
inline Executor *getDefaultExecutor() {
static ThreadPoolExecutor exec;
return &exec;
}
#endif
} // namespace internal
/// \brief Allows launching a number of tasks and waiting for them to finish
/// either explicitly via sync() or implicitly on destruction.
class TaskGroup {
Latch _latch;
public:
void spawn(std::function<void()> f) {
_latch.inc();
internal::getDefaultExecutor()->add([&, f] {
f();
_latch.dec();
});
}
void sync() const { _latch.sync(); }
};
#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
template <class RandomAccessIterator, class Comp>
#if !LLVM_ENABLE_THREADS
template <class RandomAccessIterator, class Comparator>
void parallel_sort(
RandomAccessIterator start, RandomAccessIterator end,
const Comp &comp = std::less<
RandomAccessIterator Start, RandomAccessIterator End,
const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
std::sort(start, end, comp);
std::sort(Start, End, Comp);
}
#elif defined(_MSC_VER)
// Use ppl parallel_sort on Windows.
template <class RandomAccessIterator, class Comp>
template <class RandomAccessIterator, class Comparator>
void parallel_sort(
RandomAccessIterator start, RandomAccessIterator end,
const Comp &comp = std::less<
RandomAccessIterator Start, RandomAccessIterator End,
const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
concurrency::parallel_sort(start, end, comp);
concurrency::parallel_sort(Start, End, Comp);
}
#else
namespace detail {
const ptrdiff_t minParallelSize = 1024;
const ptrdiff_t MinParallelSize = 1024;
/// \brief Inclusive median.
template <class RandomAccessIterator, class Comp>
RandomAccessIterator medianOf3(RandomAccessIterator start,
RandomAccessIterator end, const Comp &comp) {
RandomAccessIterator mid = start + (std::distance(start, end) / 2);
return comp(*start, *(end - 1))
? (comp(*mid, *(end - 1)) ? (comp(*start, *mid) ? mid : start)
: end - 1)
: (comp(*mid, *start) ? (comp(*(end - 1), *mid) ? mid : end - 1)
: start);
template <class RandomAccessIterator, class Comparator>
RandomAccessIterator medianOf3(RandomAccessIterator Start,
RandomAccessIterator End,
const Comparator &Comp) {
RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);
return Comp(*Start, *(End - 1))
? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)
: End - 1)
: (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)
: Start);
}
template <class RandomAccessIterator, class Comp>
void parallel_quick_sort(RandomAccessIterator start, RandomAccessIterator end,
const Comp &comp, TaskGroup &tg, size_t depth) {
template <class RandomAccessIterator, class Comparator>
void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,
const Comparator &Comp, TaskGroup &TG, size_t Depth) {
// Do a sequential sort for small inputs.
if (std::distance(start, end) < detail::minParallelSize || depth == 0) {
std::sort(start, end, comp);
if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {
std::sort(Start, End, Comp);
return;
}
// Partition.
auto pivot = medianOf3(start, end, comp);
// Move pivot to end.
std::swap(*(end - 1), *pivot);
pivot = std::partition(start, end - 1, [&comp, end](decltype(*start) v) {
return comp(v, *(end - 1));
auto Pivot = medianOf3(Start, End, Comp);
// Move Pivot to End.
std::swap(*(End - 1), *Pivot);
Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {
return Comp(V, *(End - 1));
});
// Move pivot to middle of partition.
std::swap(*pivot, *(end - 1));
// Move Pivot to middle of partition.
std::swap(*Pivot, *(End - 1));
// Recurse.
tg.spawn([=, &comp, &tg] {
parallel_quick_sort(start, pivot, comp, tg, depth - 1);
TG.spawn([=, &Comp, &TG] {
parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);
});
parallel_quick_sort(pivot + 1, end, comp, tg, depth - 1);
parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);
}
}
template <class RandomAccessIterator, class Comp>
template <class RandomAccessIterator, class Comparator>
void parallel_sort(
RandomAccessIterator start, RandomAccessIterator end,
const Comp &comp = std::less<
RandomAccessIterator Start, RandomAccessIterator End,
const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
TaskGroup tg;
detail::parallel_quick_sort(start, end, comp, tg,
llvm::Log2_64(std::distance(start, end)) + 1);
TaskGroup TG;
detail::parallel_quick_sort(Start, End, Comp, TG,
llvm::Log2_64(std::distance(Start, End)) + 1);
}
#endif
template <class T> void parallel_sort(T *start, T *end) {
parallel_sort(start, end, std::less<T>());
template <class T> void parallel_sort(T *Start, T *End) {
parallel_sort(Start, End, std::less<T>());
}
#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
#if !LLVM_ENABLE_THREADS
template <class IterTy, class FuncTy>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
std::for_each(Begin, End, Fn);
@ -302,12 +133,12 @@ void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
if (TaskSize == 0)
TaskSize = 1;
TaskGroup Tg;
TaskGroup TG;
while (TaskSize <= std::distance(Begin, End)) {
Tg.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
TG.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
Begin += TaskSize;
}
Tg.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });
TG.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });
}
template <class IndexTy, class FuncTy>
@ -316,20 +147,20 @@ void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
if (TaskSize == 0)
TaskSize = 1;
TaskGroup Tg;
TaskGroup TG;
IndexTy I = Begin;
for (; I + TaskSize < End; I += TaskSize) {
Tg.spawn([=, &Fn] {
TG.spawn([=, &Fn] {
for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
Fn(J);
});
}
Tg.spawn([=, &Fn] {
TG.spawn([=, &Fn] {
for (IndexTy J = I; J < End; ++J)
Fn(J);
});
}
#endif
} // end namespace lld
} // End namespace lld
#endif // LLD_CORE_PARALLEL_H

View File

@ -0,0 +1,65 @@
//===- lld/Core/TaskGroup.h - Task Group ----------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLD_CORE_TASKGROUP_H
#define LLD_CORE_TASKGROUP_H
#include "lld/Core/LLVM.h"
#include <condition_variable>
#include <functional>
#include <mutex>
namespace lld {
/// \brief Allows one or more threads to wait on a potentially unknown number of
/// events.
///
/// A latch starts at \p count. inc() increments this, and dec() decrements it.
/// All calls to sync() will block while the count is not 0.
///
/// Calling dec() on a Latch with a count of 0 has undefined behaivor.
class Latch {
uint32_t _count;
mutable std::mutex _condMut;
mutable std::condition_variable _cond;
public:
explicit Latch(uint32_t count = 0) : _count(count) {}
~Latch() { sync(); }
void inc() {
std::unique_lock<std::mutex> lock(_condMut);
++_count;
}
void dec() {
std::unique_lock<std::mutex> lock(_condMut);
if (--_count == 0)
_cond.notify_all();
}
void sync() const {
std::unique_lock<std::mutex> lock(_condMut);
_cond.wait(lock, [&] { return _count == 0; });
}
};
/// \brief Allows launching a number of tasks and waiting for them to finish
/// either explicitly via sync() or implicitly on destruction.
class TaskGroup {
Latch _latch;
public:
void spawn(std::function<void()> f);
void sync() const { _latch.sync(); }
};
}
#endif

View File

@ -12,6 +12,7 @@ add_lld_library(lldCore
Resolver.cpp
SymbolTable.cpp
TargetOptionsCommandFlags.cpp
TaskGroup.cpp
Writer.cpp
ADDITIONAL_HEADER_DIRS

140
lld/lib/Core/TaskGroup.cpp Normal file
View File

@ -0,0 +1,140 @@
//===- lld/Core/TaskGroup.cpp - Task Group --------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "lld/Core/TaskGroup.h"
#include "llvm/config/llvm-config.h"
#include <atomic>
#include <stack>
#if defined(_MSC_VER) && LLVM_ENABLE_THREADS
#include <concrt.h>
#include <ppl.h>
#endif
using namespace lld;
namespace {
/// \brief An abstract class that takes closures and runs them asynchronously.
class Executor {
public:
virtual ~Executor() = default;
virtual void add(std::function<void()> func) = 0;
static Executor *getDefaultExecutor();
};
#if !LLVM_ENABLE_THREADS
class SyncExecutor : public Executor {
public:
virtual void add(std::function<void()> F) { F(); }
};
Executor *Executor::getDefaultExecutor() {
static SyncExecutor Exec;
return &Exec;
}
#elif defined(_MSC_VER)
/// \brief An Executor that runs tasks via ConcRT.
class ConcRTExecutor : public Executor {
struct Taskish {
Taskish(std::function<void()> Task) : Task(Task) {}
std::function<void()> Task;
static void run(void *P) {
Taskish *Self = static_cast<Taskish *>(P);
Self->Task();
concurrency::Free(Self);
}
};
public:
virtual void add(std::function<void()> F) {
Concurrency::CurrentScheduler::ScheduleTask(
Taskish::run, new (concurrency::Alloc(sizeof(Taskish))) Taskish(F));
}
};
Executor *Executor::getDefaultExecutor() {
static ConcRTExecutor exec;
return &exec;
}
#else
/// \brief An implementation of an Executor that runs closures on a thread pool
/// in filo order.
class ThreadPoolExecutor : public Executor {
public:
explicit ThreadPoolExecutor(
unsigned ThreadCount = std::thread::hardware_concurrency())
: Done(ThreadCount) {
// Spawn all but one of the threads in another thread as spawning threads
// can take a while.
std::thread([&, ThreadCount] {
for (size_t i = 1; i < ThreadCount; ++i) {
std::thread([=] { work(); }).detach();
}
work();
}).detach();
}
~ThreadPoolExecutor() override {
std::unique_lock<std::mutex> Lock(Mutex);
Stop = true;
Lock.unlock();
Cond.notify_all();
// Wait for ~Latch.
}
void add(std::function<void()> F) override {
std::unique_lock<std::mutex> Lock(Mutex);
WorkStack.push(F);
Lock.unlock();
Cond.notify_one();
}
private:
void work() {
while (true) {
std::unique_lock<std::mutex> Lock(Mutex);
Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });
if (Stop)
break;
auto Task = WorkStack.top();
WorkStack.pop();
Lock.unlock();
Task();
}
Done.dec();
}
std::atomic<bool> Stop{false};
std::stack<std::function<void()>> WorkStack;
std::mutex Mutex;
std::condition_variable Cond;
Latch Done;
};
Executor *Executor::getDefaultExecutor() {
static ThreadPoolExecutor exec;
return &exec;
}
#endif
}
void TaskGroup::spawn(std::function<void()> f) {
_latch.inc();
Executor::getDefaultExecutor()->add([&, f] {
f();
_latch.dec();
});
}

View File

@ -3,5 +3,5 @@ add_lld_unittest(CoreTests
)
target_link_libraries(CoreTests
${LLVM_PTHREAD_LIB}
lldCore ${LLVM_PTHREAD_LIB}
)