Add paralell_for and use it where appropriate.

When we iterate over numbers as opposed to iterable elements,
parallel_for fits better than parallel_for_each.

llvm-svn: 288002
This commit is contained in:
Rui Ueyama 2016-11-27 19:28:32 +00:00
parent 7ad961cc70
commit 1dd86a664f
2 changed files with 50 additions and 22 deletions

View File

@ -334,16 +334,14 @@ void BuildIdSection<ELFT>::computeHash(
std::vector<ArrayRef<uint8_t>> Chunks = split(Data, 1024 * 1024);
std::vector<uint8_t> Hashes(Chunks.size() * HashSize);
auto Fn = [&](ArrayRef<uint8_t> &Chunk) {
size_t Idx = &Chunk - Chunks.data();
HashFn(Hashes.data() + Idx * HashSize, Chunk);
};
auto Fn = [&](size_t I) { HashFn(Hashes.data() + I * HashSize, Chunks[I]); };
// Compute hash values.
if (Config->Threads)
parallel_for_each(Chunks.begin(), Chunks.end(), Fn);
parallel_for(size_t(0), Chunks.size(), Fn);
else
std::for_each(Chunks.begin(), Chunks.end(), Fn);
for (size_t I = 0, E = Chunks.size(); I != E; ++I)
Fn(I);
// Write to the final output buffer.
HashFn(HashBuf, Hashes);

View File

@ -270,33 +270,63 @@ template <class T> void parallel_sort(T *start, T *end) {
}
#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
template <class Iterator, class Func>
void parallel_for_each(Iterator begin, Iterator end, Func func) {
std::for_each(begin, end, func);
template <class IterTy, class FuncTy>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
std::for_each(Begin, End, Fn);
}
template <class IndexTy, class FuncTy>
void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
for (IndexTy I = Begin; I != End; ++I)
Fn(I);
}
#elif defined(_MSC_VER)
// Use ppl parallel_for_each on Windows.
template <class Iterator, class Func>
void parallel_for_each(Iterator begin, Iterator end, Func func) {
concurrency::parallel_for_each(begin, end, func);
template <class IterTy, class FuncTy>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
concurrency::parallel_for_each(Begin, End, Fn);
}
template <class IndexTy, class FuncTy>
void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
concurrency::parallel_for(Begin, End, Fn);
}
#else
template <class Iterator, class Func>
void parallel_for_each(Iterator begin, Iterator end, Func func) {
template <class IterTy, class FuncTy>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
// TaskGroup has a relatively high overhead, so we want to reduce
// the number of spawn() calls. We'll create up to 1024 tasks here.
// (Note that 1024 is an arbitrary number. This code probably needs
// improving to take the number of available cores into account.)
ptrdiff_t taskSize = std::distance(begin, end) / 1024;
if (taskSize == 0)
taskSize = 1;
ptrdiff_t TaskSize = std::distance(Begin, End) / 1024;
if (TaskSize == 0)
TaskSize = 1;
TaskGroup tg;
while (taskSize <= std::distance(begin, end)) {
tg.spawn([=, &func] { std::for_each(begin, begin + taskSize, func); });
begin += taskSize;
TaskGroup Tg;
while (TaskSize <= std::distance(Begin, End)) {
Tg.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
Begin += TaskSize;
}
std::for_each(begin, end, func);
std::for_each(Begin, End, Fn);
}
template <class IndexTy, class FuncTy>
void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
ptrdiff_t TaskSize = (End - Begin) / 1024;
if (TaskSize == 0)
TaskSize = 1;
TaskGroup Tg;
IndexTy I = Begin;
for (; I < End; I += TaskSize) {
Tg.spawn([=, &Fn] {
for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
Fn(J);
});
Begin += TaskSize;
}
for (; I < End; ++I)
Fn(I);
}
#endif
} // end namespace lld