mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-02-04 00:06:50 +00:00
Add noexcept throughout <atomic>
The CMake CheckLibcxxAtomic module was always failing to compile the example, even when libatomic wasn't needed. This was caused because the check doesn't link a C++ runtime library to provide std::terminate, which is required for exception support. The check is still really broken, but <atomic> is better! llvm-svn: 364146
This commit is contained in:
parent
08c699a110
commit
cf92a1f6eb
@ -926,156 +926,156 @@ struct __cxx_atomic_base_impl {
|
||||
#define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY inline
|
||||
void __cxx_atomic_thread_fence(memory_order __order) {
|
||||
void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
|
||||
__c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY inline
|
||||
void __cxx_atomic_signal_fence(memory_order __order) {
|
||||
void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
|
||||
__c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) {
|
||||
void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT {
|
||||
__c11_atomic_init(&__a->__a_value, __val);
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) {
|
||||
void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT {
|
||||
__c11_atomic_init(&__a->__a_value, __val);
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) {
|
||||
void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT {
|
||||
__c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) {
|
||||
void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT {
|
||||
__c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) {
|
||||
_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT {
|
||||
using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*;
|
||||
return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) {
|
||||
_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
|
||||
using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*;
|
||||
return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) {
|
||||
_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) {
|
||||
_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
|
||||
bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
|
||||
return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
|
||||
bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
|
||||
return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
|
||||
bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
|
||||
return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) {
|
||||
bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
|
||||
return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) {
|
||||
_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) {
|
||||
_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) {
|
||||
_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) {
|
||||
_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
template<class _Tp>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) {
|
||||
_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
|
||||
return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user