[libc++][PSTL] Remove configurations flags

Some configurations flags are always the same. We can just remove them to make the code a bit cleaner.

Reviewed By: ldionne, #libc

Spies: pcwang-thead, libcxx-commits, miyuki

Differential Revision: https://reviews.llvm.org/D149502
This commit is contained in:
Nikolas Klauser 2023-05-02 09:26:59 -07:00
parent a524f84780
commit 18a8bfa297
6 changed files with 11 additions and 95 deletions

View File

@ -1326,7 +1326,7 @@ __brick_unique(_RandomAccessIterator __first,
_RandomAccessIterator __last,
_BinaryPredicate __pred,
/*is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::unique(__first, __last, __pred);
}
@ -2088,7 +2088,7 @@ _RandomAccessIterator __brick_partition(
_RandomAccessIterator __last,
_UnaryPredicate __pred,
/*is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::partition(__first, __last, __pred);
}
@ -2194,7 +2194,7 @@ _RandomAccessIterator __brick_stable_partition(
_RandomAccessIterator __last,
_UnaryPredicate __pred,
/*__is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::stable_partition(__first, __last, __pred);
}
@ -2893,7 +2893,7 @@ _RandomAccessIterator3 __brick_merge(
_RandomAccessIterator3 __d_first,
_Compare __comp,
/* __is_vector = */ std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::merge(__first1, __last1, __first2, __last2, __d_first, __comp);
}
@ -2971,7 +2971,7 @@ void __brick_inplace_merge(
_RandomAccessIterator __last,
_Compare __comp,
/* __is_vector = */ std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial")
// TODO: vectorize
std::inplace_merge(__first, __middle, __last, __comp);
}
@ -3409,7 +3409,7 @@ _OutputIterator __brick_set_union(
_OutputIterator __result,
_Compare __comp,
/*__is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::set_union(__first1, __last1, __first2, __last2, __result, __comp);
}
@ -3500,7 +3500,7 @@ _RandomAccessIterator3 __brick_set_intersection(
_RandomAccessIterator3 __result,
_Compare __comp,
/*__is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::set_intersection(__first1, __last1, __first2, __last2, __result, __comp);
}
@ -3636,7 +3636,7 @@ _RandomAccessIterator3 __brick_set_difference(
_RandomAccessIterator3 __result,
_Compare __comp,
/*__is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::set_difference(__first1, __last1, __first2, __last2, __result, __comp);
}
@ -3774,7 +3774,7 @@ _RandomAccessIterator3 __brick_set_symmetric_difference(
_RandomAccessIterator3 __result,
_Compare __comp,
/*__is_vector=*/std::true_type) noexcept {
_PSTL_PRAGMA_MESSAGE("Vectorized algorithm unimplemented, redirected to serial");
// TODO: vectorize
return std::set_symmetric_difference(__first1, __last1, __first2, __last2, __result, __comp);
}
@ -4067,13 +4067,7 @@ std::pair<_ForwardIterator1, _ForwardIterator2> __mismatch_serial(
_ForwardIterator2 __first2,
_ForwardIterator2 __last2,
_BinaryPredicate __pred) {
#if defined(_PSTL_CPP14_2RANGE_MISMATCH_EQUAL_PRESENT)
return std::mismatch(__first1, __last1, __first2, __last2, __pred);
#else
for (; __first1 != __last1 && __first2 != __last2 && __pred(*__first1, *__first2); ++__first1, ++__first2) {
}
return std::make_pair(__first1, __first2);
#endif
}
template <class _ForwardIterator1, class _ForwardIterator2, class _Predicate>

View File

@ -48,11 +48,8 @@ struct is_execution_policy<__pstl::execution::parallel_unsequenced_policy> : std
template <>
struct is_execution_policy<__pstl::execution::unsequenced_policy> : std::true_type {};
#if defined(_PSTL_CPP14_VARIABLE_TEMPLATES_PRESENT)
template <class _Tp>
constexpr bool is_execution_policy_v = __pstl::execution::is_execution_policy<_Tp>::value;
#endif
} // namespace v1
} // namespace execution

View File

@ -214,7 +214,6 @@ std::pair<_OutputIterator, _Tp> __brick_transform_scan(
/*is_vector=*/std::false_type) noexcept {
for (; __first != __last; ++__first, ++__result) {
*__result = __init;
_PSTL_PRAGMA_FORCEINLINE
__init = __binary_op(__init, __unary_op(*__first));
}
return std::make_pair(__result, __init);
@ -232,7 +231,6 @@ std::pair<_OutputIterator, _Tp> __brick_transform_scan(
/*Inclusive*/ std::true_type,
/*is_vector=*/std::false_type) noexcept {
for (; __first != __last; ++__first, ++__result) {
_PSTL_PRAGMA_FORCEINLINE
__init = __binary_op(__init, __unary_op(*__first));
*__result = __init;
}
@ -438,7 +436,6 @@ typename std::enable_if<std::is_floating_point<_Tp>::value, _OutputIterator>::ty
__result + __i + __len,
__result + __i,
[&__initial, &__binary_op](const _Tp& __x) {
_PSTL_PRAGMA_FORCEINLINE
return __binary_op(__initial, __x);
}) -
1);

View File

@ -31,7 +31,7 @@ namespace __pstl
namespace __par_backend = __omp_backend;
}
#else
_PSTL_PRAGMA_MESSAGE("Parallel backend was not specified");
# error "No backend set"
#endif
#endif /* _PSTL_PARALLEL_BACKEND_H */

View File

@ -31,8 +31,6 @@
# define _PSTL_PRAGMA_SIMD_REDUCTION(PRM)
#endif //Enable SIMD
#define _PSTL_PRAGMA_FORCEINLINE
// TODO: find out when to enable these annotations
#if 0
# define _PSTL_PRAGMA_SIMD_SCAN(PRM) _PSTL_PRAGMA(omp simd reduction(inscan, PRM))
@ -44,18 +42,11 @@
# define _PSTL_PRAGMA_SIMD_EXCLUSIVE_SCAN(PRM)
#endif
#define _PSTL_CPP14_2RANGE_MISMATCH_EQUAL_PRESENT
#define _PSTL_CPP14_MAKE_REVERSE_ITERATOR_PRESENT
#define _PSTL_CPP14_INTEGER_SEQUENCE_PRESENT
#define _PSTL_CPP14_VARIABLE_TEMPLATES_PRESENT
#if defined(_OPENMP) && _OPENMP >= 201307
# define _PSTL_UDR_PRESENT
#endif
#define _PSTL_PRAGMA_SIMD_EARLYEXIT
#define _PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC(PRM)
#define _PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC_2ARGS(PRM1, PRM2)
#define _PSTL_USE_NONTEMPORAL_STORES_IF_ALLOWED
// Declaration of reduction functor, where
// NAME - the name of the functor
@ -67,9 +58,4 @@
#define _PSTL_PRAGMA_DECLARE_REDUCTION(NAME, OP) \
_PSTL_PRAGMA(omp declare reduction(NAME:OP : omp_out(omp_in)) initializer(omp_priv = omp_orig))
#define _PSTL_PRAGMA_VECTOR_UNALIGNED
#define _PSTL_USE_NONTEMPORAL_STORES_IF_ALLOWED
#define _PSTL_PRAGMA_MESSAGE(x)
#define _PSTL_PRAGMA_MESSAGE_POLICIES(x)
#endif /* _PSTL_CONFIG_H */

View File

@ -64,15 +64,6 @@ template <class _Index, class _DifferenceType, class _Pred>
_LIBCPP_HIDE_FROM_ABI bool
__simd_or(_Index __first, _DifferenceType __n, _Pred __pred) noexcept
{
#if defined(_PSTL_EARLYEXIT_PRESENT)
_DifferenceType __i;
_PSTL_PRAGMA_VECTOR_UNALIGNED
_PSTL_PRAGMA_SIMD_EARLYEXIT
for (__i = 0; __i < __n; ++__i)
if (__pred(__first[__i]))
break;
return __i < __n;
#else
_DifferenceType __block_size = 4 < __n ? 4 : __n;
const _Index __last = __first + __n;
while (__last != __first)
@ -97,32 +88,18 @@ __simd_or(_Index __first, _DifferenceType __n, _Pred __pred) noexcept
}
}
return false;
#endif
}
template <class _Index, class _DifferenceType, class _Compare>
_LIBCPP_HIDE_FROM_ABI _Index
__simd_first(_Index __first, _DifferenceType __begin, _DifferenceType __end, _Compare __comp) noexcept
{
#if defined(_PSTL_EARLYEXIT_PRESENT)
_DifferenceType __i = __begin;
_PSTL_PRAGMA_VECTOR_UNALIGNED // Do not generate peel loop part
_PSTL_PRAGMA_SIMD_EARLYEXIT for (; __i < __end; ++__i)
{
if (__comp(__first, __i))
{
break;
}
}
return __first + __i;
#else
// Experiments show good block sizes like this
const _DifferenceType __block_size = 8;
alignas(__lane_size) _DifferenceType __lane[__block_size] = {0};
while (__end - __begin >= __block_size)
{
_DifferenceType __found = 0;
_PSTL_PRAGMA_VECTOR_UNALIGNED // Do not generate peel loop part
_PSTL_PRAGMA_SIMD_REDUCTION(|
: __found) for (_DifferenceType __i = __begin; __i < __begin + __block_size;
++__i)
@ -157,22 +134,12 @@ __simd_first(_Index __first, _DifferenceType __begin, _DifferenceType __end, _Co
++__begin;
}
return __first + __end;
#endif //_PSTL_EARLYEXIT_PRESENT
}
template <class _Index1, class _DifferenceType, class _Index2, class _Pred>
_LIBCPP_HIDE_FROM_ABI std::pair<_Index1, _Index2>
__simd_first(_Index1 __first1, _DifferenceType __n, _Index2 __first2, _Pred __pred) noexcept
{
#if defined(_PSTL_EARLYEXIT_PRESENT)
_DifferenceType __i = 0;
_PSTL_PRAGMA_VECTOR_UNALIGNED
_PSTL_PRAGMA_SIMD_EARLYEXIT
for (; __i < __n; ++__i)
if (__pred(__first1[__i], __first2[__i]))
break;
return std::make_pair(__first1 + __i, __first2 + __i);
#else
const _Index1 __last1 = __first1 + __n;
const _Index2 __last2 = __first2 + __n;
// Experiments show good block sizes like this
@ -182,7 +149,6 @@ __simd_first(_Index1 __first1, _DifferenceType __n, _Index2 __first2, _Pred __pr
{
_DifferenceType __found = 0;
_DifferenceType __i;
_PSTL_PRAGMA_VECTOR_UNALIGNED // Do not generate peel loop part
_PSTL_PRAGMA_SIMD_REDUCTION(|
: __found) for (__i = 0; __i < __block_size; ++__i)
{
@ -211,7 +177,6 @@ __simd_first(_Index1 __first1, _DifferenceType __n, _Index2 __first2, _Pred __pr
return std::make_pair(__first1, __first2);
return std::make_pair(__last1, __last2);
#endif //_PSTL_EARLYEXIT_PRESENT
}
template <class _Index, class _DifferenceType, class _Pred>
@ -241,7 +206,6 @@ __simd_unique_copy(_InputIterator __first, _DifferenceType __n, _OutputIterator
_PSTL_PRAGMA_SIMD
for (_DifferenceType __i = 1; __i < __n; ++__i)
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC(__cnt : 1)
if (!__pred(__first[__i], __first[__i - 1]))
{
__result[__cnt] = __first[__i];
@ -271,7 +235,6 @@ __simd_copy_if(_InputIterator __first, _DifferenceType __n, _OutputIterator __re
_PSTL_PRAGMA_SIMD
for (_DifferenceType __i = 0; __i < __n; ++__i)
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC(__cnt : 1)
if (__pred(__first[__i]))
{
__result[__cnt] = __first[__i];
@ -322,7 +285,6 @@ __simd_copy_by_mask(_InputIterator __first, _DifferenceType __n, _OutputIterator
{
if (__mask[__i])
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC(__cnt : 1)
{
__assigner(__first + __i, __result + __cnt);
++__cnt;
@ -340,7 +302,6 @@ __simd_partition_by_mask(_InputIterator __first, _DifferenceType __n, _OutputIte
_PSTL_PRAGMA_SIMD
for (_DifferenceType __i = 0; __i < __n; ++__i)
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC_2ARGS(__cnt_true : 1, __cnt_false : 1)
if (__mask[__i])
{
__out_true[__cnt_true] = __first[__i];
@ -386,17 +347,6 @@ __simd_adjacent_find(_Index __first, _Index __last, _BinaryPredicate __pred, boo
typedef typename std::iterator_traits<_Index>::difference_type _DifferenceType;
_DifferenceType __i = 0;
#if defined(_PSTL_EARLYEXIT_PRESENT)
//Some compiler versions fail to compile the following loop when iterators are used. Indices are used instead
const _DifferenceType __n = __last - __first - 1;
_PSTL_PRAGMA_VECTOR_UNALIGNED
_PSTL_PRAGMA_SIMD_EARLYEXIT
for (; __i < __n; ++__i)
if (__pred(__first[__i], __first[__i + 1]))
break;
return __i < __n ? __first + __i : __last;
#else
// Experiments show good block sizes like this
//TODO: to consider tuning block_size for various data types
const _DifferenceType __block_size = 8;
@ -404,7 +354,6 @@ __simd_adjacent_find(_Index __first, _Index __last, _BinaryPredicate __pred, boo
while (__last - __first >= __block_size)
{
_DifferenceType __found = 0;
_PSTL_PRAGMA_VECTOR_UNALIGNED // Do not generate peel loop part
_PSTL_PRAGMA_SIMD_REDUCTION(|
: __found) for (__i = 0; __i < __block_size - 1; ++__i)
{
@ -437,7 +386,6 @@ __simd_adjacent_find(_Index __first, _Index __last, _BinaryPredicate __pred, boo
return __first;
return __last;
#endif
}
// It was created to reduce the code inside std::enable_if
@ -559,13 +507,11 @@ __simd_scan(_InputIterator __first, _Size __n, _OutputIterator __result, _UnaryO
_CombinerType __init_{__init, &__binary_op};
_PSTL_PRAGMA_DECLARE_REDUCTION(__bin_op, _CombinerType)
_PSTL_PRAGMA_SIMD_SCAN(__bin_op : __init_)
for (_Size __i = 0; __i < __n; ++__i)
{
__result[__i] = __init_.__value;
_PSTL_PRAGMA_SIMD_EXCLUSIVE_SCAN(__init_)
_PSTL_PRAGMA_FORCEINLINE
__init_.__value = __binary_op(__init_.__value, __unary_op(__first[__i]));
}
return std::make_pair(__result + __n, __init_.__value);
@ -601,11 +547,9 @@ __simd_scan(_InputIterator __first, _Size __n, _OutputIterator __result, _UnaryO
_CombinerType __init_{__init, &__binary_op};
_PSTL_PRAGMA_DECLARE_REDUCTION(__bin_op, _CombinerType)
_PSTL_PRAGMA_SIMD_SCAN(__bin_op : __init_)
for (_Size __i = 0; __i < __n; ++__i)
{
_PSTL_PRAGMA_FORCEINLINE
__init_.__value = __binary_op(__init_.__value, __unary_op(__first[__i]));
_PSTL_PRAGMA_SIMD_INCLUSIVE_SCAN(__init_)
__result[__i] = __init_.__value;
@ -768,7 +712,6 @@ __simd_partition_copy(_InputIterator __first, _DifferenceType __n, _OutputIterat
_PSTL_PRAGMA_SIMD
for (_DifferenceType __i = 0; __i < __n; ++__i)
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC_2ARGS(__cnt_true : 1, __cnt_false : 1)
if (__pred(__first[__i]))
{
__out_true[__cnt_true] = __first[__i];
@ -849,7 +792,6 @@ __simd_remove_if(_RandomAccessIterator __first, _DifferenceType __n, _UnaryPredi
_PSTL_PRAGMA_SIMD
for (_DifferenceType __i = 1; __i < __n; ++__i)
{
_PSTL_PRAGMA_SIMD_ORDERED_MONOTONIC(__cnt : 1)
if (!__pred(__current[__i]))
{
__current[__cnt] = std::move(__current[__i]);