boost: Update to 1.83.0 (#33)

This commit is contained in:
GPUCode 2023-11-04 21:30:42 +02:00 committed by GitHub
parent 3c27c785ad
commit 4b0f33f6f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
815 changed files with 55208 additions and 51093 deletions

View File

@ -144,7 +144,7 @@ import option ;
import tools/boost\_install/boost-install ;
path-constant BOOST_ROOT : . ;
constant BOOST_VERSION : 1.82.0 ;
constant BOOST_VERSION : 1.83.0 ;
constant BOOST_JAMROOT_MODULE : $(__name__) ;
# Allow subprojects to simply `import config : requires ;` to get access to the requires rule

View File

@ -16,7 +16,7 @@ To update the Boost version (or to add a new library) follow these steps:
```
- Store the boost directory in a variable for later use: `$boost_dir = $pwd`.
- Add bcp to your path: `$env:Path += ";$boost_dir\bin.v2\tools\bcp\msvc-14.2\release\link-static\threading-multi"` (The correct output path should be printed by b2 during the build.)
- Add bcp to your path: `$env:Path += ";$boost_dir\bin.v2\tools\bcp\msvc-14.3\release\link-static\threading-multi"` (The correct output path should be printed by b2 during the build.)
- `cd` to this repo's directory (`...\externals\boost\`)
- Remove the existing boost from the repo: `rm -r boost` (This is only necessary if doing a Boost version upgrade, in case they removed any files in the new version.)
- Run `.\build.cmd $boost_dir` to build a new trimmed down distro.

View File

@ -1,176 +0,0 @@
// Boost string_algo library case_conv.hpp header file ---------------------------//
// Copyright Pavol Droba 2002-2003.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/ for updates, documentation, and revision history.
#ifndef BOOST_STRING_CASE_CONV_HPP
#define BOOST_STRING_CASE_CONV_HPP
#include <boost/algorithm/string/config.hpp>
#include <algorithm>
#include <locale>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/range/as_literal.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/range/value_type.hpp>
#include <boost/algorithm/string/detail/case_conv.hpp>
/*! \file
Defines sequence case-conversion algorithms.
Algorithms convert each element in the input sequence to the
desired case using provided locales.
*/
namespace boost {
namespace algorithm {
// to_lower -----------------------------------------------//
//! Convert to lower case
/*!
Each element of the input sequence is converted to lower
case. The result is a copy of the input converted to lower case.
It is returned as a sequence or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input range
\param Loc A locale used for conversion
\return
An output iterator pointing just after the last inserted character or
a copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<typename OutputIteratorT, typename RangeT>
inline OutputIteratorT
to_lower_copy(
OutputIteratorT Output,
const RangeT& Input,
const std::locale& Loc=std::locale())
{
return ::boost::algorithm::detail::transform_range_copy(
Output,
::boost::as_literal(Input),
::boost::algorithm::detail::to_lowerF<
typename range_value<RangeT>::type >(Loc));
}
//! Convert to lower case
/*!
\overload
*/
template<typename SequenceT>
inline SequenceT to_lower_copy(
const SequenceT& Input,
const std::locale& Loc=std::locale())
{
return ::boost::algorithm::detail::transform_range_copy<SequenceT>(
Input,
::boost::algorithm::detail::to_lowerF<
typename range_value<SequenceT>::type >(Loc));
}
//! Convert to lower case
/*!
Each element of the input sequence is converted to lower
case. The input sequence is modified in-place.
\param Input A range
\param Loc a locale used for conversion
*/
template<typename WritableRangeT>
inline void to_lower(
WritableRangeT& Input,
const std::locale& Loc=std::locale())
{
::boost::algorithm::detail::transform_range(
::boost::as_literal(Input),
::boost::algorithm::detail::to_lowerF<
typename range_value<WritableRangeT>::type >(Loc));
}
// to_upper -----------------------------------------------//
//! Convert to upper case
/*!
Each element of the input sequence is converted to upper
case. The result is a copy of the input converted to upper case.
It is returned as a sequence or copied to the output iterator
\param Output An output iterator to which the result will be copied
\param Input An input range
\param Loc A locale used for conversion
\return
An output iterator pointing just after the last inserted character or
a copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<typename OutputIteratorT, typename RangeT>
inline OutputIteratorT
to_upper_copy(
OutputIteratorT Output,
const RangeT& Input,
const std::locale& Loc=std::locale())
{
return ::boost::algorithm::detail::transform_range_copy(
Output,
::boost::as_literal(Input),
::boost::algorithm::detail::to_upperF<
typename range_value<RangeT>::type >(Loc));
}
//! Convert to upper case
/*!
\overload
*/
template<typename SequenceT>
inline SequenceT to_upper_copy(
const SequenceT& Input,
const std::locale& Loc=std::locale())
{
return ::boost::algorithm::detail::transform_range_copy<SequenceT>(
Input,
::boost::algorithm::detail::to_upperF<
typename range_value<SequenceT>::type >(Loc));
}
//! Convert to upper case
/*!
Each element of the input sequence is converted to upper
case. The input sequence is modified in-place.
\param Input An input range
\param Loc a locale used for conversion
*/
template<typename WritableRangeT>
inline void to_upper(
WritableRangeT& Input,
const std::locale& Loc=std::locale())
{
::boost::algorithm::detail::transform_range(
::boost::as_literal(Input),
::boost::algorithm::detail::to_upperF<
typename range_value<WritableRangeT>::type >(Loc));
}
} // namespace algorithm
// pull names to the boost namespace
using algorithm::to_lower;
using algorithm::to_lower_copy;
using algorithm::to_upper;
using algorithm::to_upper_copy;
} // namespace boost
#endif // BOOST_STRING_CASE_CONV_HPP

View File

@ -1,130 +0,0 @@
// Boost string_algo library string_funct.hpp header file ---------------------------//
// Copyright Pavol Droba 2002-2003.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/ for updates, documentation, and revision history.
#ifndef BOOST_STRING_CASE_CONV_DETAIL_HPP
#define BOOST_STRING_CASE_CONV_DETAIL_HPP
#include <boost/algorithm/string/config.hpp>
#include <locale>
#include <functional>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/type_traits/make_unsigned.hpp>
namespace boost {
namespace algorithm {
namespace detail {
// case conversion functors -----------------------------------------------//
#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
#pragma warning(push)
#pragma warning(disable:4512) //assignment operator could not be generated
#endif
// a tolower functor
template<typename CharT>
struct to_lowerF
{
typedef CharT argument_type;
typedef CharT result_type;
// Constructor
to_lowerF( const std::locale& Loc ) : m_Loc( &Loc ) {}
// Operation
CharT operator ()( CharT Ch ) const
{
#if defined(BOOST_BORLANDC) && (BOOST_BORLANDC >= 0x560) && (BOOST_BORLANDC <= 0x564) && !defined(_USE_OLD_RW_STL)
return std::tolower( static_cast<typename boost::make_unsigned <CharT>::type> ( Ch ));
#else
return std::tolower<CharT>( Ch, *m_Loc );
#endif
}
private:
const std::locale* m_Loc;
};
// a toupper functor
template<typename CharT>
struct to_upperF
{
typedef CharT argument_type;
typedef CharT result_type;
// Constructor
to_upperF( const std::locale& Loc ) : m_Loc( &Loc ) {}
// Operation
CharT operator ()( CharT Ch ) const
{
#if defined(BOOST_BORLANDC) && (BOOST_BORLANDC >= 0x560) && (BOOST_BORLANDC <= 0x564) && !defined(_USE_OLD_RW_STL)
return std::toupper( static_cast<typename boost::make_unsigned <CharT>::type> ( Ch ));
#else
return std::toupper<CharT>( Ch, *m_Loc );
#endif
}
private:
const std::locale* m_Loc;
};
#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400)
#pragma warning(pop)
#endif
// algorithm implementation -------------------------------------------------------------------------
// Transform a range
template<typename OutputIteratorT, typename RangeT, typename FunctorT>
OutputIteratorT transform_range_copy(
OutputIteratorT Output,
const RangeT& Input,
FunctorT Functor)
{
return std::transform(
::boost::begin(Input),
::boost::end(Input),
Output,
Functor);
}
// Transform a range (in-place)
template<typename RangeT, typename FunctorT>
void transform_range(
const RangeT& Input,
FunctorT Functor)
{
std::transform(
::boost::begin(Input),
::boost::end(Input),
::boost::begin(Input),
Functor);
}
template<typename SequenceT, typename RangeT, typename FunctorT>
inline SequenceT transform_range_copy(
const RangeT& Input,
FunctorT Functor)
{
return SequenceT(
::boost::make_transform_iterator(
::boost::begin(Input),
Functor),
::boost::make_transform_iterator(
::boost::end(Input),
Functor));
}
} // namespace detail
} // namespace algorithm
} // namespace boost
#endif // BOOST_STRING_CASE_CONV_DETAIL_HPP

View File

@ -1,844 +0,0 @@
// Boost string_algo library erase.hpp header file ---------------------------//
// Copyright Pavol Droba 2002-2006.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/ for updates, documentation, and revision history.
#ifndef BOOST_STRING_ERASE_HPP
#define BOOST_STRING_ERASE_HPP
#include <boost/algorithm/string/config.hpp>
#include <boost/range/iterator_range_core.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/range/iterator.hpp>
#include <boost/range/const_iterator.hpp>
#include <boost/algorithm/string/find_format.hpp>
#include <boost/algorithm/string/finder.hpp>
#include <boost/algorithm/string/formatter.hpp>
/*! \file
Defines various erase algorithms. Each algorithm removes
part(s) of the input according to a searching criteria.
*/
namespace boost {
namespace algorithm {
// erase_range -------------------------------------------------------//
//! Erase range algorithm
/*!
Remove the given range from the input. The result is a modified copy of
the input. It is returned as a sequence or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input sequence
\param SearchRange A range in the input to be removed
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<typename OutputIteratorT, typename RangeT>
inline OutputIteratorT erase_range_copy(
OutputIteratorT Output,
const RangeT& Input,
const iterator_range<
BOOST_STRING_TYPENAME
range_const_iterator<RangeT>::type>& SearchRange )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::range_finder(SearchRange),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase range algorithm
/*!
\overload
*/
template<typename SequenceT>
inline SequenceT erase_range_copy(
const SequenceT& Input,
const iterator_range<
BOOST_STRING_TYPENAME
range_const_iterator<SequenceT>::type>& SearchRange )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::range_finder(SearchRange),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase range algorithm
/*!
Remove the given range from the input.
The input sequence is modified in-place.
\param Input An input sequence
\param SearchRange A range in the input to be removed
*/
template<typename SequenceT>
inline void erase_range(
SequenceT& Input,
const iterator_range<
BOOST_STRING_TYPENAME
range_iterator<SequenceT>::type>& SearchRange )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::range_finder(SearchRange),
::boost::algorithm::empty_formatter(Input) );
}
// erase_first --------------------------------------------------------//
//! Erase first algorithm
/*!
Remove the first occurrence of the substring from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT erase_first_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase first algorithm
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT erase_first_copy(
const SequenceT& Input,
const RangeT& Search )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase first algorithm
/*!
Remove the first occurrence of the substring from the input.
The input sequence is modified in-place.
\param Input An input string
\param Search A substring to be searched for.
*/
template<typename SequenceT, typename RangeT>
inline void erase_first(
SequenceT& Input,
const RangeT& Search )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
// erase_first ( case insensitive ) ------------------------------------//
//! Erase first algorithm ( case insensitive )
/*!
Remove the first occurrence of the substring from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
Searching is case insensitive.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for
\param Loc A locale used for case insensitive comparison
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT ierase_first_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase first algorithm ( case insensitive )
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT ierase_first_copy(
const SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase first algorithm ( case insensitive )
/*!
Remove the first occurrence of the substring from the input.
The input sequence is modified in-place. Searching is case insensitive.
\param Input An input string
\param Search A substring to be searched for
\param Loc A locale used for case insensitive comparison
*/
template<typename SequenceT, typename RangeT>
inline void ierase_first(
SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
// erase_last --------------------------------------------------------//
//! Erase last algorithm
/*!
Remove the last occurrence of the substring from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for.
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT erase_last_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::last_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase last algorithm
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT erase_last_copy(
const SequenceT& Input,
const RangeT& Search )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::last_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase last algorithm
/*!
Remove the last occurrence of the substring from the input.
The input sequence is modified in-place.
\param Input An input string
\param Search A substring to be searched for
*/
template<typename SequenceT, typename RangeT>
inline void erase_last(
SequenceT& Input,
const RangeT& Search )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::last_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
// erase_last ( case insensitive ) ------------------------------------//
//! Erase last algorithm ( case insensitive )
/*!
Remove the last occurrence of the substring from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
Searching is case insensitive.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for
\param Loc A locale used for case insensitive comparison
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT ierase_last_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::last_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase last algorithm ( case insensitive )
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT ierase_last_copy(
const SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::last_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase last algorithm ( case insensitive )
/*!
Remove the last occurrence of the substring from the input.
The input sequence is modified in-place. Searching is case insensitive.
\param Input An input string
\param Search A substring to be searched for
\param Loc A locale used for case insensitive comparison
*/
template<typename SequenceT, typename RangeT>
inline void ierase_last(
SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::last_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
// erase_nth --------------------------------------------------------------------//
//! Erase nth algorithm
/*!
Remove the Nth occurrence of the substring in the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for
\param Nth An index of the match to be replaced. The index is 0-based.
For negative N, matches are counted from the end of string.
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT erase_nth_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search,
int Nth )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::nth_finder(Search, Nth),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase nth algorithm
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT erase_nth_copy(
const SequenceT& Input,
const RangeT& Search,
int Nth )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::nth_finder(Search, Nth),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase nth algorithm
/*!
Remove the Nth occurrence of the substring in the input.
The input sequence is modified in-place.
\param Input An input string
\param Search A substring to be searched for.
\param Nth An index of the match to be replaced. The index is 0-based.
For negative N, matches are counted from the end of string.
*/
template<typename SequenceT, typename RangeT>
inline void erase_nth(
SequenceT& Input,
const RangeT& Search,
int Nth )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::nth_finder(Search, Nth),
::boost::algorithm::empty_formatter(Input) );
}
// erase_nth ( case insensitive ) ---------------------------------------------//
//! Erase nth algorithm ( case insensitive )
/*!
Remove the Nth occurrence of the substring in the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
Searching is case insensitive.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for.
\param Nth An index of the match to be replaced. The index is 0-based.
For negative N, matches are counted from the end of string.
\param Loc A locale used for case insensitive comparison
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT ierase_nth_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search,
int Nth,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase nth algorithm
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT ierase_nth_copy(
const SequenceT& Input,
const RangeT& Search,
int Nth,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
empty_formatter(Input) );
}
//! Erase nth algorithm
/*!
Remove the Nth occurrence of the substring in the input.
The input sequence is modified in-place. Searching is case insensitive.
\param Input An input string
\param Search A substring to be searched for.
\param Nth An index of the match to be replaced. The index is 0-based.
For negative N, matches are counted from the end of string.
\param Loc A locale used for case insensitive comparison
*/
template<typename SequenceT, typename RangeT>
inline void ierase_nth(
SequenceT& Input,
const RangeT& Search,
int Nth,
const std::locale& Loc=std::locale() )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::nth_finder(Search, Nth, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
// erase_all --------------------------------------------------------//
//! Erase all algorithm
/*!
Remove all the occurrences of the string from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input sequence
\param Search A substring to be searched for.
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT erase_all_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search )
{
return ::boost::algorithm::find_format_all_copy(
Output,
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase all algorithm
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT erase_all_copy(
const SequenceT& Input,
const RangeT& Search )
{
return ::boost::algorithm::find_format_all_copy(
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase all algorithm
/*!
Remove all the occurrences of the string from the input.
The input sequence is modified in-place.
\param Input An input string
\param Search A substring to be searched for.
*/
template<typename SequenceT, typename RangeT>
inline void erase_all(
SequenceT& Input,
const RangeT& Search )
{
::boost::algorithm::find_format_all(
Input,
::boost::algorithm::first_finder(Search),
::boost::algorithm::empty_formatter(Input) );
}
// erase_all ( case insensitive ) ------------------------------------//
//! Erase all algorithm ( case insensitive )
/*!
Remove all the occurrences of the string from the input.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
Searching is case insensitive.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param Search A substring to be searched for
\param Loc A locale used for case insensitive comparison
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename Range1T,
typename Range2T>
inline OutputIteratorT ierase_all_copy(
OutputIteratorT Output,
const Range1T& Input,
const Range2T& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_all_copy(
Output,
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase all algorithm ( case insensitive )
/*!
\overload
*/
template<typename SequenceT, typename RangeT>
inline SequenceT ierase_all_copy(
const SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
return ::boost::algorithm::find_format_all_copy(
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
//! Erase all algorithm ( case insensitive )
/*!
Remove all the occurrences of the string from the input.
The input sequence is modified in-place. Searching is case insensitive.
\param Input An input string
\param Search A substring to be searched for.
\param Loc A locale used for case insensitive comparison
*/
template<typename SequenceT, typename RangeT>
inline void ierase_all(
SequenceT& Input,
const RangeT& Search,
const std::locale& Loc=std::locale() )
{
::boost::algorithm::find_format_all(
Input,
::boost::algorithm::first_finder(Search, is_iequal(Loc)),
::boost::algorithm::empty_formatter(Input) );
}
// erase_head --------------------------------------------------------------------//
//! Erase head algorithm
/*!
Remove the head from the input. The head is a prefix of a sequence of given size.
If the sequence is shorter then required, the whole string is
considered to be the head. The result is a modified copy of the input.
It is returned as a sequence or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param N Length of the head.
For N>=0, at most N characters are extracted.
For N<0, size(Input)-|N| characters are extracted.
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename RangeT>
inline OutputIteratorT erase_head_copy(
OutputIteratorT Output,
const RangeT& Input,
int N )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::head_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
//! Erase head algorithm
/*!
\overload
*/
template<typename SequenceT>
inline SequenceT erase_head_copy(
const SequenceT& Input,
int N )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::head_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
//! Erase head algorithm
/*!
Remove the head from the input. The head is a prefix of a sequence of given size.
If the sequence is shorter then required, the whole string is
considered to be the head. The input sequence is modified in-place.
\param Input An input string
\param N Length of the head
For N>=0, at most N characters are extracted.
For N<0, size(Input)-|N| characters are extracted.
*/
template<typename SequenceT>
inline void erase_head(
SequenceT& Input,
int N )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::head_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
// erase_tail --------------------------------------------------------------------//
//! Erase tail algorithm
/*!
Remove the tail from the input. The tail is a suffix of a sequence of given size.
If the sequence is shorter then required, the whole string is
considered to be the tail.
The result is a modified copy of the input. It is returned as a sequence
or copied to the output iterator.
\param Output An output iterator to which the result will be copied
\param Input An input string
\param N Length of the tail.
For N>=0, at most N characters are extracted.
For N<0, size(Input)-|N| characters are extracted.
\return An output iterator pointing just after the last inserted character or
a modified copy of the input
\note The second variant of this function provides the strong exception-safety guarantee
*/
template<
typename OutputIteratorT,
typename RangeT>
inline OutputIteratorT erase_tail_copy(
OutputIteratorT Output,
const RangeT& Input,
int N )
{
return ::boost::algorithm::find_format_copy(
Output,
Input,
::boost::algorithm::tail_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
//! Erase tail algorithm
/*!
\overload
*/
template<typename SequenceT>
inline SequenceT erase_tail_copy(
const SequenceT& Input,
int N )
{
return ::boost::algorithm::find_format_copy(
Input,
::boost::algorithm::tail_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
//! Erase tail algorithm
/*!
Remove the tail from the input. The tail is a suffix of a sequence of given size.
If the sequence is shorter then required, the whole string is
considered to be the tail. The input sequence is modified in-place.
\param Input An input string
\param N Length of the tail
For N>=0, at most N characters are extracted.
For N<0, size(Input)-|N| characters are extracted.
*/
template<typename SequenceT>
inline void erase_tail(
SequenceT& Input,
int N )
{
::boost::algorithm::find_format(
Input,
::boost::algorithm::tail_finder(N),
::boost::algorithm::empty_formatter( Input ) );
}
} // namespace algorithm
// pull names into the boost namespace
using algorithm::erase_range_copy;
using algorithm::erase_range;
using algorithm::erase_first_copy;
using algorithm::erase_first;
using algorithm::ierase_first_copy;
using algorithm::ierase_first;
using algorithm::erase_last_copy;
using algorithm::erase_last;
using algorithm::ierase_last_copy;
using algorithm::ierase_last;
using algorithm::erase_nth_copy;
using algorithm::erase_nth;
using algorithm::ierase_nth_copy;
using algorithm::ierase_nth;
using algorithm::erase_all_copy;
using algorithm::erase_all;
using algorithm::ierase_all_copy;
using algorithm::ierase_all;
using algorithm::erase_head_copy;
using algorithm::erase_head;
using algorithm::erase_tail_copy;
using algorithm::erase_tail;
} // namespace boost
#endif // BOOST_ERASE_HPP

View File

@ -23,9 +23,11 @@
#include <memory>
#include <utility>
#include <boost/asio/any_completion_executor.hpp>
#include <boost/asio/any_io_executor.hpp>
#include <boost/asio/associated_allocator.hpp>
#include <boost/asio/associated_cancellation_slot.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/associated_immediate_executor.hpp>
#include <boost/asio/cancellation_state.hpp>
#include <boost/asio/recycling_allocator.hpp>
@ -126,6 +128,13 @@ public:
(get_associated_executor)(handler_, candidate));
}
any_completion_executor immediate_executor(
const any_io_executor& candidate) const BOOST_ASIO_NOEXCEPT
{
return any_completion_executor(std::nothrow,
(get_associated_immediate_executor)(handler_, candidate));
}
void* allocate(std::size_t size, std::size_t align) const
{
typename std::allocator_traits<
@ -306,6 +315,36 @@ private:
type executor_fn_;
};
class any_completion_handler_immediate_executor_fn
{
public:
using type = any_completion_executor(*)(
any_completion_handler_impl_base*, const any_io_executor&);
constexpr any_completion_handler_immediate_executor_fn(type fn)
: immediate_executor_fn_(fn)
{
}
any_completion_executor immediate_executor(
any_completion_handler_impl_base* impl,
const any_io_executor& candidate) const
{
return immediate_executor_fn_(impl, candidate);
}
template <typename Handler>
static any_completion_executor impl(any_completion_handler_impl_base* impl,
const any_io_executor& candidate)
{
return static_cast<any_completion_handler_impl<Handler>*>(
impl)->immediate_executor(candidate);
}
private:
type immediate_executor_fn_;
};
class any_completion_handler_allocate_fn
{
public:
@ -368,6 +407,7 @@ template <typename... Signatures>
class any_completion_handler_fn_table
: private any_completion_handler_destroy_fn,
private any_completion_handler_executor_fn,
private any_completion_handler_immediate_executor_fn,
private any_completion_handler_allocate_fn,
private any_completion_handler_deallocate_fn,
private any_completion_handler_call_fns<Signatures...>
@ -377,11 +417,13 @@ public:
constexpr any_completion_handler_fn_table(
any_completion_handler_destroy_fn::type destroy_fn,
any_completion_handler_executor_fn::type executor_fn,
any_completion_handler_immediate_executor_fn::type immediate_executor_fn,
any_completion_handler_allocate_fn::type allocate_fn,
any_completion_handler_deallocate_fn::type deallocate_fn,
CallFns... call_fns)
: any_completion_handler_destroy_fn(destroy_fn),
any_completion_handler_executor_fn(executor_fn),
any_completion_handler_immediate_executor_fn(immediate_executor_fn),
any_completion_handler_allocate_fn(allocate_fn),
any_completion_handler_deallocate_fn(deallocate_fn),
any_completion_handler_call_fns<Signatures...>(call_fns...)
@ -390,6 +432,7 @@ public:
using any_completion_handler_destroy_fn::destroy;
using any_completion_handler_executor_fn::executor;
using any_completion_handler_immediate_executor_fn::immediate_executor;
using any_completion_handler_allocate_fn::allocate;
using any_completion_handler_deallocate_fn::deallocate;
using any_completion_handler_call_fns<Signatures...>::call;
@ -402,6 +445,7 @@ struct any_completion_handler_fn_table_instance
value = any_completion_handler_fn_table<Signatures...>(
&any_completion_handler_destroy_fn::impl<Handler>,
&any_completion_handler_executor_fn::impl<Handler>,
&any_completion_handler_immediate_executor_fn::impl<Handler>,
&any_completion_handler_allocate_fn::impl<Handler>,
&any_completion_handler_deallocate_fn::impl<Handler>,
&any_completion_handler_call_fn<Signatures>::template impl<Handler>...);
@ -577,6 +621,9 @@ private:
template <typename, typename>
friend struct associated_executor;
template <typename, typename>
friend struct associated_immediate_executor;
const detail::any_completion_handler_fn_table<Signatures...>* fn_table_;
detail::any_completion_handler_impl_base* impl_;
#endif // !defined(GENERATING_DOCUMENTATION)
@ -751,6 +798,20 @@ struct associated_executor<any_completion_handler<Signatures...>, Candidate>
}
};
template <typename... Signatures, typename Candidate>
struct associated_immediate_executor<
any_completion_handler<Signatures...>, Candidate>
{
using type = any_completion_executor;
static type get(const any_completion_handler<Signatures...>& handler,
const Candidate& candidate = Candidate()) BOOST_ASIO_NOEXCEPT
{
return handler.fn_table_->immediate_executor(handler.impl_,
any_io_executor(std::nothrow, candidate));
}
};
} // namespace asio
} // namespace boost

View File

@ -472,20 +472,40 @@ namespace detail {
template <typename TargetAsyncResult,
typename Allocator, typename = void>
struct allocator_binder_async_result_completion_handler_type
class allocator_binder_completion_handler_async_result
{
public:
template <typename T>
explicit allocator_binder_completion_handler_async_result(T&)
{
}
};
template <typename TargetAsyncResult, typename Allocator>
struct allocator_binder_async_result_completion_handler_type<
class allocator_binder_completion_handler_async_result<
TargetAsyncResult, Allocator,
typename void_type<
typename TargetAsyncResult::completion_handler_type
>::type>
{
public:
typedef allocator_binder<
typename TargetAsyncResult::completion_handler_type, Allocator>
completion_handler_type;
explicit allocator_binder_completion_handler_async_result(
typename TargetAsyncResult::completion_handler_type& handler)
: target_(handler)
{
}
typename TargetAsyncResult::return_type get()
{
return target_.get();
}
private:
TargetAsyncResult target_;
};
template <typename TargetAsyncResult, typename = void>
@ -507,22 +527,18 @@ struct allocator_binder_async_result_return_type<
template <typename T, typename Allocator, typename Signature>
class async_result<allocator_binder<T, Allocator>, Signature> :
public detail::allocator_binder_async_result_completion_handler_type<
public detail::allocator_binder_completion_handler_async_result<
async_result<T, Signature>, Allocator>,
public detail::allocator_binder_async_result_return_type<
async_result<T, Signature> >
{
public:
explicit async_result(allocator_binder<T, Allocator>& b)
: target_(b.get())
: detail::allocator_binder_completion_handler_async_result<
async_result<T, Signature>, Allocator>(b.get())
{
}
typename async_result<T, Signature>::return_type get()
{
return target_.get();
}
template <typename Initiation>
struct init_wrapper
{

View File

@ -474,20 +474,40 @@ namespace detail {
template <typename TargetAsyncResult,
typename CancellationSlot, typename = void>
struct cancellation_slot_binder_async_result_completion_handler_type
class cancellation_slot_binder_completion_handler_async_result
{
public:
template <typename T>
explicit cancellation_slot_binder_completion_handler_async_result(T&)
{
}
};
template <typename TargetAsyncResult, typename CancellationSlot>
struct cancellation_slot_binder_async_result_completion_handler_type<
class cancellation_slot_binder_completion_handler_async_result<
TargetAsyncResult, CancellationSlot,
typename void_type<
typename TargetAsyncResult::completion_handler_type
>::type>
{
public:
typedef cancellation_slot_binder<
typename TargetAsyncResult::completion_handler_type, CancellationSlot>
completion_handler_type;
explicit cancellation_slot_binder_completion_handler_async_result(
typename TargetAsyncResult::completion_handler_type& handler)
: target_(handler)
{
}
typename TargetAsyncResult::return_type get()
{
return target_.get();
}
private:
TargetAsyncResult target_;
};
template <typename TargetAsyncResult, typename = void>
@ -509,22 +529,18 @@ struct cancellation_slot_binder_async_result_return_type<
template <typename T, typename CancellationSlot, typename Signature>
class async_result<cancellation_slot_binder<T, CancellationSlot>, Signature> :
public detail::cancellation_slot_binder_async_result_completion_handler_type<
public detail::cancellation_slot_binder_completion_handler_async_result<
async_result<T, Signature>, CancellationSlot>,
public detail::cancellation_slot_binder_async_result_return_type<
async_result<T, Signature> >
{
public:
explicit async_result(cancellation_slot_binder<T, CancellationSlot>& b)
: target_(b.get())
: detail::cancellation_slot_binder_completion_handler_async_result<
async_result<T, Signature>, CancellationSlot>(b.get())
{
}
typename async_result<T, Signature>::return_type get()
{
return target_.get();
}
template <typename Initiation>
struct init_wrapper
{

View File

@ -472,22 +472,41 @@ bind_immediate_executor(const Executor& e, BOOST_ASIO_MOVE_ARG(T) t)
namespace detail {
template <typename TargetAsyncResult,
typename Executor, typename = void>
struct immediate_executor_binder_async_result_completion_handler_type
template <typename TargetAsyncResult, typename Executor, typename = void>
class immediate_executor_binder_completion_handler_async_result
{
public:
template <typename T>
explicit immediate_executor_binder_completion_handler_async_result(T&)
{
}
};
template <typename TargetAsyncResult, typename Executor>
struct immediate_executor_binder_async_result_completion_handler_type<
class immediate_executor_binder_completion_handler_async_result<
TargetAsyncResult, Executor,
typename void_type<
typename TargetAsyncResult::completion_handler_type
>::type>
{
public:
typedef immediate_executor_binder<
typename TargetAsyncResult::completion_handler_type, Executor>
completion_handler_type;
explicit immediate_executor_binder_completion_handler_async_result(
typename TargetAsyncResult::completion_handler_type& handler)
: target_(handler)
{
}
typename TargetAsyncResult::return_type get()
{
return target_.get();
}
private:
TargetAsyncResult target_;
};
template <typename TargetAsyncResult, typename = void>
@ -509,22 +528,18 @@ struct immediate_executor_binder_async_result_return_type<
template <typename T, typename Executor, typename Signature>
class async_result<immediate_executor_binder<T, Executor>, Signature> :
public detail::immediate_executor_binder_async_result_completion_handler_type<
public detail::immediate_executor_binder_completion_handler_async_result<
async_result<T, Signature>, Executor>,
public detail::immediate_executor_binder_async_result_return_type<
async_result<T, Signature> >
{
public:
explicit async_result(immediate_executor_binder<T, Executor>& b)
: target_(b.get())
: detail::immediate_executor_binder_completion_handler_async_result<
async_result<T, Signature>, Executor>(b.get())
{
}
typename async_result<T, Signature>::return_type get()
{
return target_.get();
}
template <typename Initiation>
struct init_wrapper
{

View File

@ -340,6 +340,13 @@ struct associator<Associator,
* @param io_objects_or_executors Zero or more I/O objects or I/O executors for
* which outstanding work must be maintained.
*
* @par Per-Operation Cancellation
* By default, terminal per-operation cancellation is enabled for
* composed operations that are implemented using @c async_compose. To
* disable cancellation for the composed operation, or to alter its
* supported cancellation types, call the @c self object's @c
* reset_cancellation_state function.
*
* @par Example:
*
* @code struct async_echo_implementation

View File

@ -1679,7 +1679,7 @@
# include <unistd.h>
#endif // defined(BOOST_ASIO_HAS_UNISTD_H)
// Linux: epoll, eventfd and timerfd.
// Linux: epoll, eventfd, timerfd and io_uring.
#if defined(__linux__)
# include <linux/version.h>
# if !defined(BOOST_ASIO_HAS_EPOLL)
@ -1703,6 +1703,11 @@
# endif // (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8)
# endif // defined(BOOST_ASIO_HAS_EPOLL)
# endif // !defined(BOOST_ASIO_HAS_TIMERFD)
# if defined(BOOST_ASIO_HAS_IO_URING)
# if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
# error Linux kernel 5.10 or later is required to support io_uring
# endif // LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
# endif // defined(BOOST_ASIO_HAS_IO_URING)
#endif // defined(__linux__)
// Linux: io_uring is used instead of epoll.
@ -2042,7 +2047,7 @@
# endif // !defined(BOOST_ASIO_DISABLE_HANDLER_HOOKS)
#endif // !defined(BOOST_ASIO_HAS_HANDLER_HOOKS)
// Support for the __thread keyword extension.
// Support for the __thread keyword extension, or equivalent.
#if !defined(BOOST_ASIO_DISABLE_THREAD_KEYWORD_EXTENSION)
# if defined(__linux__)
# if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
@ -2064,6 +2069,22 @@
# define BOOST_ASIO_THREAD_KEYWORD __declspec(thread)
# endif // (_MSC_VER >= 1700)
# endif // defined(BOOST_ASIO_MSVC) && defined(BOOST_ASIO_WINDOWS_RUNTIME)
# if defined(__APPLE__)
# if defined(__clang__)
# if defined(__apple_build_version__)
# define BOOST_ASIO_HAS_THREAD_KEYWORD_EXTENSION 1
# define BOOST_ASIO_THREAD_KEYWORD __thread
# endif // defined(__apple_build_version__)
# endif // defined(__clang__)
# endif // defined(__APPLE__)
# if !defined(BOOST_ASIO_HAS_THREAD_KEYWORD_EXTENSION)
# if defined(BOOST_ASIO_HAS_BOOST_CONFIG)
# if !defined(BOOST_NO_CXX11_THREAD_LOCAL)
# define BOOST_ASIO_HAS_THREAD_KEYWORD_EXTENSION 1
# define BOOST_ASIO_THREAD_KEYWORD thread_local
# endif // !defined(BOOST_NO_CXX11_THREAD_LOCAL)
# endif // defined(BOOST_ASIO_HAS_BOOST_CONFIG)
# endif // !defined(BOOST_ASIO_HAS_THREAD_KEYWORD_EXTENSION)
#endif // !defined(BOOST_ASIO_DISABLE_THREAD_KEYWORD_EXTENSION)
#if !defined(BOOST_ASIO_THREAD_KEYWORD)
# define BOOST_ASIO_THREAD_KEYWORD __thread

View File

@ -109,10 +109,9 @@ public:
{
start_op(op_type, descriptor, descriptor_data,
op, is_continuation, allow_speculative,
&epoll_reactor::call_post_immediate_completion, this);
&dev_poll_reactor::call_post_immediate_completion, this);
}
// Cancel all operations associated with the given descriptor. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.

View File

@ -436,15 +436,16 @@ void io_uring_service::run(long usec, op_queue<operation>& ops)
? ::io_uring_peek_cqe(&ring_, &cqe)
: ::io_uring_wait_cqe(&ring_, &cqe);
if (result == 0 && usec > 0)
if (local_ops > 0)
{
if (::io_uring_cqe_get_data(cqe) != &ts)
if (result != 0 || ::io_uring_cqe_get_data(cqe) != &ts)
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
++local_ops;
::io_uring_prep_timeout_remove(sqe, reinterpret_cast<__u64>(&ts), 0);
::io_uring_sqe_set_data(sqe, &ts);
submit_sqes();
}
}
@ -452,37 +453,41 @@ void io_uring_service::run(long usec, op_queue<operation>& ops)
bool check_timers = false;
int count = 0;
while (result == 0)
while (result == 0 || local_ops > 0)
{
if (void* ptr = ::io_uring_cqe_get_data(cqe))
if (result == 0)
{
if (ptr == this)
if (void* ptr = ::io_uring_cqe_get_data(cqe))
{
// The io_uring service was interrupted.
}
else if (ptr == &timer_queues_)
{
check_timers = true;
}
else if (ptr == &timeout_)
{
check_timers = true;
timeout_.tv_sec = 0;
timeout_.tv_nsec = 0;
}
else if (ptr == &ts)
{
--local_ops;
}
else
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
if (ptr == this)
{
// The io_uring service was interrupted.
}
else if (ptr == &timer_queues_)
{
check_timers = true;
}
else if (ptr == &timeout_)
{
check_timers = true;
timeout_.tv_sec = 0;
timeout_.tv_nsec = 0;
}
else if (ptr == &ts)
{
--local_ops;
}
else
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
}
}
::io_uring_cqe_seen(&ring_, cqe);
++count;
}
::io_uring_cqe_seen(&ring_, cqe);
result = (++count < complete_batch_size || local_ops > 0)
result = (count < complete_batch_size || local_ops > 0)
? ::io_uring_peek_cqe(&ring_, &cqe) : -EAGAIN;
}

View File

@ -387,6 +387,7 @@ boost::system::error_code signal_set_service::add(
if (state->flags_[signal_number] != signal_set_base::flags::dont_care)
{
ec = boost::asio::error::invalid_argument;
delete new_registration;
return ec;
}
struct sigaction sa;
@ -398,6 +399,7 @@ boost::system::error_code signal_set_service::add(
{
ec = boost::system::error_code(errno,
boost::asio::error::get_system_category());
delete new_registration;
return ec;
}
state->flags_[signal_number] = f;

View File

@ -46,10 +46,12 @@ namespace asio {
namespace detail {
#if defined(BOOST_ASIO_HAS_STD_SHARED_PTR)
using std::allocate_shared;
using std::make_shared;
using std::shared_ptr;
using std::weak_ptr;
#else // defined(BOOST_ASIO_HAS_STD_SHARED_PTR)
using boost::allocate_shared;
using boost::make_shared;
using boost::shared_ptr;
using boost::weak_ptr;

View File

@ -701,7 +701,7 @@ protected:
{
public:
reactor_op_cancellation(reactor* r,
reactor::per_descriptor_data* p, int d, int o)
reactor::per_descriptor_data* p, socket_type d, int o)
: reactor_(r),
reactor_data_(p),
descriptor_(d),
@ -724,7 +724,7 @@ protected:
private:
reactor* reactor_;
reactor::per_descriptor_data* reactor_data_;
int descriptor_;
socket_type descriptor_;
int op_type_;
};

View File

@ -1824,6 +1824,11 @@ public:
>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_property<Property> found;
prop_fns_[found::index].query(0, object_fns_->target(*this),
&static_cast<const typename found::type&>(p));
@ -1843,6 +1848,11 @@ public:
>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_property<Property> found;
typename remove_reference<
typename found::query_result_type>::type* result = 0;
@ -1865,6 +1875,11 @@ public:
>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_property<Property> found;
typename found::query_result_type result;
prop_fns_[found::index].query(&result, object_fns_->target(*this),
@ -1890,6 +1905,11 @@ public:
>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_property<Property> found;
typename found::query_result_type* result;
prop_fns_[found::index].query(&result, object_fns_->target(*this),
@ -1910,6 +1930,11 @@ public:
find_convertible_requirable_property<Property>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_requirable_property<Property> found;
return prop_fns_[found::index].require(object_fns_->target(*this),
&static_cast<const typename found::type&>(p));
@ -1927,6 +1952,11 @@ public:
find_convertible_preferable_property<Property>::value
>::type* = 0) const
{
if (!target_)
{
bad_executor ex;
boost::asio::detail::throw_exception(ex);
}
typedef find_convertible_preferable_property<Property> found;
return prop_fns_[found::index].prefer(object_fns_->target(*this),
&static_cast<const typename found::type&>(p));
@ -2307,6 +2337,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_property<Property> found; \
prop_fns_[found::index].query(0, object_fns_->target(*this), \
&static_cast<const typename found::type&>(p)); \
@ -2326,6 +2361,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_property<Property> found; \
typename remove_reference< \
typename found::query_result_type>::type* result; \
@ -2348,6 +2388,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_property<Property> found; \
typename found::query_result_type result; \
prop_fns_[found::index].query(&result, object_fns_->target(*this), \
@ -2373,6 +2418,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_property<Property> found; \
typename found::query_result_type* result; \
prop_fns_[found::index].query(&result, object_fns_->target(*this), \
@ -2393,6 +2443,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
find_convertible_requirable_property<Property>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_requirable_property<Property> found; \
return prop_fns_[found::index].require(object_fns_->target(*this), \
&static_cast<const typename found::type&>(p)); \
@ -2410,6 +2465,11 @@ inline void swap(any_executor<SupportableProperties...>& a,
find_convertible_preferable_property<Property>::value \
>::type* = 0) const \
{ \
if (!target_) \
{ \
bad_executor ex; \
boost::asio::detail::throw_exception(ex); \
} \
typedef find_convertible_preferable_property<Property> found; \
return prop_fns_[found::index].prefer(object_fns_->target(*this), \
&static_cast<const typename found::type&>(p)); \

View File

@ -18,6 +18,8 @@
#include <boost/asio/detail/config.hpp>
#include <boost/asio/associated_cancellation_slot.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/detail/memory.hpp>
#include <boost/asio/detail/recycling_allocator.hpp>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/execution/outstanding_work.hpp>
#include <boost/asio/post.hpp>
@ -70,43 +72,90 @@ struct co_spawn_work_guard<Executor,
#endif // !defined(BOOST_ASIO_NO_TS_EXECUTORS)
template <typename Executor>
inline co_spawn_work_guard<Executor>
make_co_spawn_work_guard(const Executor& ex)
template <typename Handler, typename Executor,
typename Function, typename = void>
struct co_spawn_state
{
return co_spawn_work_guard<Executor>(ex);
}
template <typename H, typename F>
co_spawn_state(H&& h, const Executor& ex, F&& f)
: handler(std::forward<H>(h)),
spawn_work(ex),
handler_work(boost::asio::get_associated_executor(handler, ex)),
function(std::forward<F>(f))
{
}
template <typename T, typename Executor, typename F, typename Handler>
Handler handler;
co_spawn_work_guard<Executor> spawn_work;
co_spawn_work_guard<typename associated_executor<
Handler, Executor>::type> handler_work;
Function function;
};
template <typename Handler, typename Executor, typename Function>
struct co_spawn_state<Handler, Executor, Function,
typename enable_if<
is_same<
typename associated_executor<Handler,
Executor>::asio_associated_executor_is_unspecialised,
void
>::value
>::type>
{
template <typename H, typename F>
co_spawn_state(H&& h, const Executor& ex, F&& f)
: handler(std::forward<H>(h)),
handler_work(ex),
function(std::forward<F>(f))
{
}
Handler handler;
co_spawn_work_guard<Executor> handler_work;
Function function;
};
struct co_spawn_dispatch
{
template <typename CompletionToken>
auto operator()(CompletionToken&& token) const
-> decltype(boost::asio::dispatch(std::forward<CompletionToken>(token)))
{
return boost::asio::dispatch(std::forward<CompletionToken>(token));
}
};
struct co_spawn_post
{
template <typename CompletionToken>
auto operator()(CompletionToken&& token) const
-> decltype(boost::asio::post(std::forward<CompletionToken>(token)))
{
return boost::asio::post(std::forward<CompletionToken>(token));
}
};
template <typename T, typename Handler, typename Executor, typename Function>
awaitable<awaitable_thread_entry_point, Executor> co_spawn_entry_point(
awaitable<T, Executor>*, Executor ex, F f, Handler handler)
awaitable<T, Executor>*, co_spawn_state<Handler, Executor, Function> s)
{
auto spawn_work = make_co_spawn_work_guard(ex);
auto handler_work = make_co_spawn_work_guard(
boost::asio::get_associated_executor(handler, ex));
(void) co_await (dispatch)(
use_awaitable_t<Executor>{__FILE__, __LINE__, "co_spawn_entry_point"});
(void) co_await co_spawn_dispatch{};
(co_await awaitable_thread_has_context_switched{}) = false;
std::exception_ptr e = nullptr;
bool done = false;
try
{
T t = co_await f();
T t = co_await s.function();
done = true;
bool switched = (co_await awaitable_thread_has_context_switched{});
if (!switched)
{
(void) co_await (post)(
use_awaitable_t<Executor>{__FILE__,
__LINE__, "co_spawn_entry_point"});
}
(void) co_await co_spawn_post();
(dispatch)(handler_work.get_executor(),
[handler = std::move(handler), t = std::move(t)]() mutable
(dispatch)(s.handler_work.get_executor(),
[handler = std::move(s.handler), t = std::move(t)]() mutable
{
std::move(handler)(std::exception_ptr(), std::move(t));
});
@ -123,34 +172,26 @@ awaitable<awaitable_thread_entry_point, Executor> co_spawn_entry_point(
bool switched = (co_await awaitable_thread_has_context_switched{});
if (!switched)
{
(void) co_await (post)(
use_awaitable_t<Executor>{__FILE__, __LINE__, "co_spawn_entry_point"});
}
(void) co_await co_spawn_post();
(dispatch)(handler_work.get_executor(),
[handler = std::move(handler), e]() mutable
(dispatch)(s.handler_work.get_executor(),
[handler = std::move(s.handler), e]() mutable
{
std::move(handler)(e, T());
});
}
template <typename Executor, typename F, typename Handler>
template <typename Handler, typename Executor, typename Function>
awaitable<awaitable_thread_entry_point, Executor> co_spawn_entry_point(
awaitable<void, Executor>*, Executor ex, F f, Handler handler)
awaitable<void, Executor>*, co_spawn_state<Handler, Executor, Function> s)
{
auto spawn_work = make_co_spawn_work_guard(ex);
auto handler_work = make_co_spawn_work_guard(
boost::asio::get_associated_executor(handler, ex));
(void) co_await (dispatch)(
use_awaitable_t<Executor>{__FILE__, __LINE__, "co_spawn_entry_point"});
(void) co_await co_spawn_dispatch{};
(co_await awaitable_thread_has_context_switched{}) = false;
std::exception_ptr e = nullptr;
try
{
co_await f();
co_await s.function();
}
catch (...)
{
@ -159,13 +200,10 @@ awaitable<awaitable_thread_entry_point, Executor> co_spawn_entry_point(
bool switched = (co_await awaitable_thread_has_context_switched{});
if (!switched)
{
(void) co_await (post)(
use_awaitable_t<Executor>{__FILE__, __LINE__, "co_spawn_entry_point"});
}
(void) co_await co_spawn_post();
(dispatch)(handler_work.get_executor(),
[handler = std::move(handler), e]() mutable
(dispatch)(s.handler_work.get_executor(),
[handler = std::move(s.handler), e]() mutable
{
std::move(handler)(e);
});
@ -194,27 +232,29 @@ class co_spawn_cancellation_handler
{
public:
co_spawn_cancellation_handler(const Handler&, const Executor& ex)
: ex_(ex)
: signal_(detail::allocate_shared<cancellation_signal>(
detail::recycling_allocator<cancellation_signal,
detail::thread_info_base::cancellation_signal_tag>())),
ex_(ex)
{
}
cancellation_slot slot()
{
return signal_.slot();
return signal_->slot();
}
void operator()(cancellation_type_t type)
{
cancellation_signal* sig = &signal_;
shared_ptr<cancellation_signal> sig = signal_;
boost::asio::dispatch(ex_, [sig, type]{ sig->emit(type); });
}
private:
cancellation_signal signal_;
shared_ptr<cancellation_signal> signal_;
Executor ex_;
};
template <typename Handler, typename Executor>
class co_spawn_cancellation_handler<Handler, Executor,
typename enable_if<
@ -266,6 +306,7 @@ public:
{
typedef typename result_of<F()>::type awaitable_type;
typedef typename decay<Handler>::type handler_type;
typedef typename decay<F>::type function_type;
typedef co_spawn_cancellation_handler<
handler_type, Executor> cancel_handler_type;
@ -282,7 +323,8 @@ public:
cancellation_state cancel_state(proxy_slot);
auto a = (co_spawn_entry_point)(static_cast<awaitable_type*>(nullptr),
ex_, std::forward<F>(f), std::forward<Handler>(handler));
co_spawn_state<handler_type, Executor, function_type>(
std::forward<Handler>(handler), ex_, std::forward<F>(f)));
awaitable_handler<executor_type, void>(std::move(a),
ex_, proxy_slot, cancel_state).launch();
}

View File

@ -36,6 +36,16 @@ public:
/// Default constructor.
bad_address_cast() {}
/// Copy constructor.
bad_address_cast(const bad_address_cast& other) BOOST_ASIO_NOEXCEPT_OR_NOTHROW
#if defined(BOOST_ASIO_MSVC) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS
: std::exception(static_cast<const std::exception&>(other))
#else
: std::bad_cast(static_cast<const std::bad_cast&>(other))
#endif
{
}
/// Destructor.
virtual ~bad_address_cast() BOOST_ASIO_NOEXCEPT_OR_NOTHROW {}

View File

@ -18,6 +18,6 @@
// BOOST_ASIO_VERSION % 100 is the sub-minor version
// BOOST_ASIO_VERSION / 100 % 1000 is the minor version
// BOOST_ASIO_VERSION / 100000 is the major version
#define BOOST_ASIO_VERSION 102800 // 1.28.0
#define BOOST_ASIO_VERSION 102802 // 1.28.2
#endif // BOOST_ASIO_VERSION_HPP

View File

@ -161,7 +161,10 @@ template<class E, class T> std::basic_ostream<E, T> & operator<<( std::basic_ost
# define BOOST_CURRENT_LOCATION ::boost::source_location(__FILE__, BOOST_CURRENT_LOCATION_IMPL_1(__LINE__), "")
#elif defined(__cpp_lib_source_location) && __cpp_lib_source_location >= 201907L
#elif defined(__cpp_lib_source_location) && __cpp_lib_source_location >= 201907L && !defined(__NVCC__)
// Under nvcc, __builtin_source_location is not constexpr
// https://github.com/boostorg/assert/issues/32
# define BOOST_CURRENT_LOCATION ::boost::source_location(::std::source_location::current())

View File

@ -0,0 +1,98 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/atomic_ref.hpp
*
* This header contains definition of \c atomic_ref template.
*/
#ifndef BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_
#define BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_
#include <boost/assert.hpp>
#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/intptr.hpp>
#include <boost/atomic/detail/classify.hpp>
#include <boost/atomic/detail/atomic_ref_impl.hpp>
#include <boost/atomic/detail/type_traits/is_trivially_copyable.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
//! Atomic reference to external object
template< typename T >
class atomic_ref :
public atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, false >
{
private:
typedef atomics::detail::base_atomic_ref< T, typename atomics::detail::classify< T >::type, false > base_type;
typedef typename base_type::value_arg_type value_arg_type;
public:
typedef typename base_type::value_type value_type;
BOOST_STATIC_ASSERT_MSG(sizeof(value_type) > 0u, "boost::atomic_ref<T> requires T to be a complete type");
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_IS_TRIVIALLY_COPYABLE)
BOOST_STATIC_ASSERT_MSG(atomics::detail::is_trivially_copyable< value_type >::value, "boost::atomic_ref<T> requires T to be a trivially copyable type");
#endif
private:
typedef typename base_type::storage_type storage_type;
public:
BOOST_DEFAULTED_FUNCTION(atomic_ref(atomic_ref const& that) BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL : base_type(static_cast< base_type const& >(that)) {})
BOOST_FORCEINLINE explicit atomic_ref(value_type& v) BOOST_NOEXCEPT : base_type(v)
{
// Check that referenced object alignment satisfies required alignment
BOOST_ASSERT((((atomics::detail::uintptr_t)this->m_value) & (base_type::required_alignment - 1u)) == 0u);
}
BOOST_FORCEINLINE value_type operator= (value_arg_type v) const BOOST_NOEXCEPT
{
this->store(v);
return v;
}
BOOST_FORCEINLINE operator value_type() const BOOST_NOEXCEPT
{
return this->load();
}
BOOST_DELETED_FUNCTION(atomic_ref& operator= (atomic_ref const&))
};
#if !defined(BOOST_NO_CXX17_DEDUCTION_GUIDES)
template< typename T >
atomic_ref(T&) -> atomic_ref< T >;
#endif // !defined(BOOST_NO_CXX17_DEDUCTION_GUIDES)
//! Atomic reference factory function
template< typename T >
BOOST_FORCEINLINE atomic_ref< T > make_atomic_ref(T& value) BOOST_NOEXCEPT
{
return atomic_ref< T >(value);
}
} // namespace atomics
using atomics::atomic_ref;
using atomics::make_atomic_ref;
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_ATOMIC_REF_HPP_INCLUDED_

View File

@ -0,0 +1,21 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/capabilities.hpp
*
* This header defines feature capabilities macros.
*/
#ifndef BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
#define BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/wait_capabilities.hpp>
#endif // BOOST_ATOMIC_CAPABILITIES_HPP_INCLUDED_

View File

@ -0,0 +1,65 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/addressof.hpp
*
* This header defines \c addressof helper function. It is similar to \c boost::addressof but it is more
* lightweight and also contains a workaround for some compiler warnings.
*/
#ifndef BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// Detection logic is based on boost/core/addressof.hpp
#if defined(BOOST_MSVC_FULL_VER) && BOOST_MSVC_FULL_VER >= 190024215
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
#elif defined(BOOST_GCC) && BOOST_GCC >= 70000
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
#elif defined(__has_builtin)
#if __has_builtin(__builtin_addressof)
#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
#endif
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename T >
BOOST_FORCEINLINE
#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
BOOST_CONSTEXPR
#endif
T* addressof(T& value) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
return __builtin_addressof(value);
#else
// Note: The point of using a local struct as the intermediate type instead of char is to avoid gcc warnings
// if T is a const volatile char*:
// warning: casting 'const volatile char* const' to 'const volatile char&' does not dereference pointer
// The local struct makes sure T is not related to the cast target type.
struct opaque_type;
return reinterpret_cast< T* >(&const_cast< opaque_type& >(reinterpret_cast< const volatile opaque_type& >(value)));
#endif
}
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_

View File

@ -0,0 +1,57 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/aligned_variable.hpp
*
* This header defines a convenience macro for declaring aligned variables
*/
#ifndef BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
#include <boost/config/helper_macros.hpp>
#include <boost/type_traits/type_with_alignment.hpp>
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
alignas(var_alignment) var_type var_name
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
alignas(var_alignment) var_type var_name
#else // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
// Note: Some compilers cannot use constant expressions in alignment attributes or alignas, so we have to use the union trick
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR(var_alignment, var_type, var_name) \
union \
{ \
var_type var_name; \
boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
}
#define BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(var_alignment, var_type, var_name) \
union \
{ \
var_type var_name; \
typename boost::type_with_alignment< var_alignment >::type BOOST_JOIN(var_name, _aligner); \
}
#endif // !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS)
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_ALIGNED_VARIABLE_HPP_INCLUDED_

View File

@ -0,0 +1,129 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/atomic_flag_impl.hpp
*
* This header contains implementation of \c atomic_flag.
*/
#ifndef BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_
#include <boost/assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/core_operations.hpp>
#include <boost/atomic/detail/wait_operations.hpp>
#include <boost/atomic/detail/aligned_variable.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
/*
* IMPLEMENTATION NOTE: All interface functions MUST be declared with BOOST_FORCEINLINE,
* see comment for convert_memory_order_to_gcc in gcc_atomic_memory_order_utils.hpp.
*/
namespace boost {
namespace atomics {
namespace detail {
#if defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT) || defined(BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX)
#define BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
#else
#define BOOST_ATOMIC_FLAG_INIT {}
#endif
//! Atomic flag implementation
template< bool IsInterprocess >
struct atomic_flag_impl
{
// Prefer 4-byte storage as most platforms support waiting/notifying operations without a lock pool for 32-bit integers
typedef atomics::detail::core_operations< 4u, false, IsInterprocess > core_operations;
typedef atomics::detail::wait_operations< core_operations > wait_operations;
typedef typename core_operations::storage_type storage_type;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = core_operations::is_always_lock_free;
static BOOST_CONSTEXPR_OR_CONST bool always_has_native_wait_notify = wait_operations::always_has_native_wait_notify;
BOOST_ATOMIC_DETAIL_ALIGNED_VAR_TPL(core_operations::storage_alignment, storage_type, m_storage);
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT atomic_flag_impl() BOOST_NOEXCEPT : m_storage(0u)
{
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
return is_always_lock_free;
}
BOOST_FORCEINLINE bool has_native_wait_notify() const volatile BOOST_NOEXCEPT
{
return wait_operations::has_native_wait_notify(m_storage);
}
BOOST_FORCEINLINE bool test(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return !!core_operations::load(m_storage, order);
}
BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return core_operations::test_and_set(m_storage, order);
}
BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
core_operations::clear(m_storage, order);
}
BOOST_FORCEINLINE bool wait(bool old_val, memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
return !!wait_operations::wait(m_storage, static_cast< storage_type >(old_val), order);
}
BOOST_FORCEINLINE void notify_one() volatile BOOST_NOEXCEPT
{
wait_operations::notify_one(m_storage);
}
BOOST_FORCEINLINE void notify_all() volatile BOOST_NOEXCEPT
{
wait_operations::notify_all(m_storage);
}
BOOST_DELETED_FUNCTION(atomic_flag_impl(atomic_flag_impl const&))
BOOST_DELETED_FUNCTION(atomic_flag_impl& operator= (atomic_flag_impl const&))
};
#if defined(BOOST_NO_CXX17_INLINE_VARIABLES)
template< bool IsInterprocess >
BOOST_CONSTEXPR_OR_CONST bool atomic_flag_impl< IsInterprocess >::is_always_lock_free;
template< bool IsInterprocess >
BOOST_CONSTEXPR_OR_CONST bool atomic_flag_impl< IsInterprocess >::always_has_native_wait_notify;
#endif
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_ATOMIC_FLAG_IMPL_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,158 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2013-2018, 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/detail/bitwise_cast.hpp
*
* This header defines \c bitwise_cast used to convert between storage and value types
*/
#ifndef BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/addressof.hpp>
#include <boost/atomic/detail/string_ops.hpp>
#include <boost/atomic/detail/type_traits/remove_cv.hpp>
#include <boost/atomic/detail/type_traits/integral_constant.hpp>
#include <boost/atomic/detail/type_traits/has_unique_object_representations.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
#if defined(__has_builtin)
#if __has_builtin(__builtin_bit_cast)
#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
#endif
#endif
#if !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) && defined(BOOST_MSVC) && BOOST_MSVC >= 1926
#define BOOST_ATOMIC_DETAIL_BIT_CAST(x, y) __builtin_bit_cast(x, y)
#endif
#endif // !defined(BOOST_ATOMIC_DETAIL_NO_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
#if defined(BOOST_NO_CXX11_CONSTEXPR) || !defined(BOOST_ATOMIC_DETAIL_BIT_CAST) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
#define BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST
#endif
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_BITWISE_CAST)
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST BOOST_CONSTEXPR
#else
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST
#endif
#if defined(BOOST_GCC) && BOOST_GCC >= 80000
#pragma GCC diagnostic push
// copying an object of non-trivial type X from an array of Y. This is benign because we use memcpy to copy trivially copyable objects.
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t ValueSize, typename To >
BOOST_FORCEINLINE void clear_tail_padding_bits(To& to, atomics::detail::true_type) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_MEMSET(reinterpret_cast< unsigned char* >(atomics::detail::addressof(to)) + ValueSize, 0, sizeof(To) - ValueSize);
}
template< std::size_t ValueSize, typename To >
BOOST_FORCEINLINE void clear_tail_padding_bits(To&, atomics::detail::false_type) BOOST_NOEXCEPT
{
}
template< std::size_t ValueSize, typename To >
BOOST_FORCEINLINE void clear_tail_padding_bits(To& to) BOOST_NOEXCEPT
{
atomics::detail::clear_tail_padding_bits< ValueSize >(to, atomics::detail::integral_constant< bool, ValueSize < sizeof(To) >());
}
template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE To bitwise_cast_memcpy(From const& from) BOOST_NOEXCEPT
{
typedef typename atomics::detail::remove_cv< To >::type unqualified_to_t;
unqualified_to_t to;
#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
From from2(from);
BOOST_ATOMIC_DETAIL_CLEAR_PADDING(atomics::detail::addressof(from2));
BOOST_ATOMIC_DETAIL_MEMCPY
(
atomics::detail::addressof(to),
atomics::detail::addressof(from2),
(FromValueSize < sizeof(unqualified_to_t) ? FromValueSize : sizeof(unqualified_to_t))
);
#else
BOOST_ATOMIC_DETAIL_MEMCPY
(
atomics::detail::addressof(to),
atomics::detail::addressof(from),
(FromValueSize < sizeof(unqualified_to_t) ? FromValueSize : sizeof(unqualified_to_t))
);
#endif
atomics::detail::clear_tail_padding_bits< FromValueSize >(to);
return to;
}
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast_impl(From const& from, atomics::detail::true_type) BOOST_NOEXCEPT
{
// This implementation is only called when the From type has no padding and From and To have the same size
return BOOST_ATOMIC_DETAIL_BIT_CAST(typename atomics::detail::remove_cv< To >::type, from);
}
template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE To bitwise_cast_impl(From const& from, atomics::detail::false_type) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
}
template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_cast_impl< To, FromValueSize >(from, atomics::detail::integral_constant< bool,
FromValueSize == sizeof(To) && atomics::detail::has_unique_object_representations< From >::value >());
}
#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
template< typename To, std::size_t FromValueSize, typename From >
BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_cast_memcpy< To, FromValueSize >(from);
}
#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
//! Converts the source object to the target type, possibly by padding or truncating it on the right, and clearing any padding bits (if supported by compiler). Preserves value bits unchanged.
template< typename To, typename From >
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_cast< To, sizeof(From) >(from);
}
} // namespace detail
} // namespace atomics
} // namespace boost
#if defined(BOOST_GCC) && BOOST_GCC >= 80000
#pragma GCC diagnostic pop
#endif
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_

View File

@ -0,0 +1,118 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018, 2021 Andrey Semashev
*/
/*!
* \file atomic/detail/bitwise_fp_cast.hpp
*
* This header defines \c bitwise_fp_cast used to convert between storage and floating point value types
*/
#ifndef BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/float_sizes.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
#include <boost/atomic/detail/type_traits/integral_constant.hpp>
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/*!
* \brief The type trait returns the size of the value of the specified floating point type
*
* This size may be less than <tt>sizeof(T)</tt> if the implementation uses padding bytes for a particular FP type. This is
* often the case with 80-bit extended double, which is stored in 12 or 16 initial bytes with tail padding filled with garbage.
*/
template< typename T >
struct value_size_of
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = sizeof(T);
};
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
template< >
struct value_size_of< float >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE;
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
template< >
struct value_size_of< double >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE;
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
template< >
struct value_size_of< long double >
{
static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE;
};
#endif
template< typename T >
struct value_size_of< const T > : value_size_of< T > {};
template< typename T >
struct value_size_of< volatile T > : value_size_of< T > {};
template< typename T >
struct value_size_of< const volatile T > : value_size_of< T > {};
#if !defined(BOOST_ATOMIC_NO_CLEAR_PADDING)
// BOOST_ATOMIC_DETAIL_CLEAR_PADDING, which is used in bitwise_cast, will clear the tail padding bits in the source object.
// We don't need to specify the actual value size to avoid redundant zeroing of the tail padding.
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) sizeof(x)
#else
#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(x) atomics::detail::value_size_of< x >::value
#endif
#if defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
template< typename To, typename From >
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
{
// For floating point types, has_unique_object_representations is typically false even if the type contains no padding bits.
// Here, we rely on our detection of the actual value size to select constexpr bit_cast implementation when possible. We assume
// here that floating point value bits are contiguous.
return atomics::detail::bitwise_cast_impl< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from, atomics::detail::integral_constant< bool,
atomics::detail::value_size_of< From >::value == sizeof(From) && atomics::detail::value_size_of< From >::value == sizeof(To) >());
}
#else // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
//! Similar to bitwise_cast, but either \c From or \c To is expected to be a floating point type. Attempts to detect the actual value size in the source object and considers the rest of the object as padding.
template< typename To, typename From >
BOOST_FORCEINLINE BOOST_ATOMIC_DETAIL_CONSTEXPR_BITWISE_CAST To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_cast< To, BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_VALUE_SIZE_OF(From) >(from);
}
#endif // defined(BOOST_ATOMIC_DETAIL_BIT_CAST)
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_

View File

@ -0,0 +1,217 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/capabilities.hpp
*
* This header defines core feature capabilities macros.
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
#include <boost/atomic/detail/float_sizes.hpp>
#endif
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/caps_)
#elif defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#ifndef BOOST_ATOMIC_INT8_LOCK_FREE
#define BOOST_ATOMIC_INT8_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT16_LOCK_FREE
#define BOOST_ATOMIC_INT16_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT32_LOCK_FREE
#define BOOST_ATOMIC_INT32_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT64_LOCK_FREE
#define BOOST_ATOMIC_INT64_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
#endif
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
#define BOOST_ATOMIC_CHAR_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_CHAR8_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR8_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
#define BOOST_ATOMIC_SHORT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
#define BOOST_ATOMIC_INT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_INT_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
#endif
#endif
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
#if (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 8
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#elif (BOOST_ATOMIC_DETAIL_SIZEOF_POINTER + 0) == 4
#define BOOST_ATOMIC_POINTER_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#else
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
#endif
#endif
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
// We store bools in 1-byte storage in all backends
#define BOOST_ATOMIC_BOOL_LOCK_FREE BOOST_ATOMIC_INT8_LOCK_FREE
#endif
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
// atomic_flag uses 4-byte storage
#define BOOST_ATOMIC_FLAG_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#endif
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
#if !defined(BOOST_ATOMIC_FLOAT_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT)
#if BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 2
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 4
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT == 8
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT <= 16
#define BOOST_ATOMIC_FLOAT_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
#else
#define BOOST_ATOMIC_FLOAT_LOCK_FREE 0
#endif
#endif
#if !defined(BOOST_ATOMIC_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE)
#if BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 2
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 4
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE == 8
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE <= 16
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
#else
#define BOOST_ATOMIC_DOUBLE_LOCK_FREE 0
#endif
#endif
#if !defined(BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
#if BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 2
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 4
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE == 8
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE > 8 && BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE <= 16
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
#else
#define BOOST_ATOMIC_LONG_DOUBLE_LOCK_FREE 0
#endif
#endif
#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
#ifndef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 0
#endif
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 0
#endif
#endif // BOOST_ATOMIC_DETAIL_CAPABILITIES_HPP_INCLUDED_

View File

@ -0,0 +1,53 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020, 2022 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_aarch32.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__ARMEL__) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
defined(BOOST_WINDOWS)
#define BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN
#elif defined(__ARMEB__) || \
defined(__ARM_BIG_ENDIAN) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
#define BOOST_ATOMIC_DETAIL_AARCH32_BIG_ENDIAN
#else
#include <boost/predef/other/endian.h>
#if BOOST_ENDIAN_LITTLE_BYTE
#define BOOST_ATOMIC_DETAIL_AARCH32_LITTLE_ENDIAN
#elif BOOST_ENDIAN_BIG_BYTE
#define BOOST_ATOMIC_DETAIL_AARCH32_BIG_ENDIAN
#else
#error "Boost.Atomic: Failed to determine AArch32 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
#endif
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH32_HPP_INCLUDED_

View File

@ -0,0 +1,65 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020, 2022 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_aarch64.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__AARCH64EL__) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
defined(BOOST_WINDOWS)
#define BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN
#elif defined(__AARCH64EB__) || \
defined(__ARM_BIG_ENDIAN) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
#define BOOST_ATOMIC_DETAIL_AARCH64_BIG_ENDIAN
#else
#include <boost/predef/other/endian.h>
#if BOOST_ENDIAN_LITTLE_BYTE
#define BOOST_ATOMIC_DETAIL_AARCH64_LITTLE_ENDIAN
#elif BOOST_ENDIAN_BIG_BYTE
#define BOOST_ATOMIC_DETAIL_AARCH64_BIG_ENDIAN
#else
#error "Boost.Atomic: Failed to determine AArch64 endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
#endif
#endif
#if defined(__ARM_FEATURE_ATOMICS)
// ARMv8.1 added Large System Extensions, which includes cas, swp, and a number of other read-modify-write instructions
#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_LSE
#endif
#if defined(__ARM_FEATURE_COMPLEX)
// ARMv8.3 added Release Consistency processor consistent (RCpc) memory model, which includes ldapr and similar instructions.
// Unfortunately, there seems to be no dedicated __ARM_FEATURE macro for this, so we use __ARM_FEATURE_COMPLEX, which is also defined starting ARMv8.3.
#define BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_AARCH64_HPP_INCLUDED_

View File

@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_alpha.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ALPHA_HPP_INCLUDED_

View File

@ -0,0 +1,103 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* ARM Code by Phil Endecott, based on other architectures.
* Copyright (c) 2014, 2020, 2022 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__ARMEL__) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \
(defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
defined(BOOST_WINDOWS)
#define BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN
#elif defined(__ARMEB__) || \
defined(__ARM_BIG_ENDIAN) || \
(defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) || \
(defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__))
#define BOOST_ATOMIC_DETAIL_ARM_BIG_ENDIAN
#else
#include <boost/predef/other/endian.h>
#if BOOST_ENDIAN_LITTLE_BYTE
#define BOOST_ATOMIC_DETAIL_ARM_LITTLE_ENDIAN
#elif BOOST_ENDIAN_BIG_BYTE
#define BOOST_ATOMIC_DETAIL_ARM_BIG_ENDIAN
#else
#error "Boost.Atomic: Failed to determine ARM endianness, the target platform is not supported. Please, report to the developers (patches are welcome)."
#endif
#endif
#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
#if BOOST_ATOMIC_DETAIL_ARM_ARCH > 6
// ARMv7 and later have dmb instruction
#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
#endif
#if defined(__ARM_FEATURE_LDREX)
#if (__ARM_FEATURE_LDREX & 1)
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
#endif
#if (__ARM_FEATURE_LDREX & 2)
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
#endif
#if (__ARM_FEATURE_LDREX & 8)
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
#endif
#else // defined(__ARM_FEATURE_LDREX)
#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
// ARMv6k and ARMv7 have 8 and 16-bit ldrex/strex variants, but at least GCC 4.7 fails to compile them. GCC 4.9 is known to work.
#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
#endif
#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7M__))
// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
// or ARMv7 (both ARM and Thumb 2) it works as expected.
#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
#endif
#endif // !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
#endif // defined(__ARM_FEATURE_LDREX)
#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH >= 6)
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,55 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_ppc.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__POWERPC__) || defined(__PPC__)
#if defined(_ARCH_PWR8)
// Power8 and later architectures have 8 and 16-bit instructions
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX
#endif
#if defined(__powerpc64__) || defined(__PPC64__)
// Power7 and later architectures in 64-bit mode have 64-bit instructions
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX
#if defined(_ARCH_PWR8)
// Power8 also has 128-bit instructions
#define BOOST_ATOMIC_DETAIL_PPC_HAS_LQARX_STQCX
#endif
#endif
#endif // defined(__POWERPC__) || defined(__PPC__)
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_PPC_HPP_INCLUDED_

View File

@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2010 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_sparc.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SPARC_HPP_INCLUDED_

View File

@ -0,0 +1,74 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_gcc_x86.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__GNUC__)
#if defined(__i386__) &&\
(\
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
defined(__i586__) || defined(__i686__) || defined(__SSE__)\
)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#if defined(__x86_64__) || defined(__SSE2__)
// Use mfence only if SSE2 is available
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
#endif
#else // defined(__GNUC__)
#if defined(__i386__) && !defined(BOOST_ATOMIC_NO_CMPXCHG8B)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(__x86_64__) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#if !defined(BOOST_ATOMIC_NO_MFENCE)
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
#endif
#endif // defined(__GNUC__)
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(__x86_64__) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_X86_HPP_INCLUDED_

View File

@ -0,0 +1,34 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_msvc_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,61 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_arch_msvc_x86.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(_M_IX86) && _M_IX86 >= 500
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
#endif
#if defined(_M_AMD64) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
#if defined(__clang__)
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#elif _MSC_VER >= 1500
#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
#endif
#endif
#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
// Use mfence only if SSE2 is available
#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#if defined(_M_AMD64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_MSVC_X86_HPP_INCLUDED_

View File

@ -0,0 +1,158 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_atomic.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/caps_arch_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// Translate type-based lock-free macros to size-based ones
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE 0
#endif
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE 0
#endif
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
#endif
#if BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 16
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_INT == 16
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 16
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#elif BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 16
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#else
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
#endif
// On x86-64, clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16:
// https://bugs.llvm.org/show_bug.cgi?id=19149
// Another problem exists with gcc 7 and later, as it requires to link with libatomic to use 16-byte intrinsics:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
// Both clang and gcc do generate cmpxchg16b for __sync_val_compare_and_swap though.
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) &&\
(\
(defined(BOOST_CLANG) && (__clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 5))) ||\
(defined(BOOST_GCC) && BOOST_GCC >= 70000)\
)
#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE 0
#endif
// On 32-bit x86, there is a clang bug for 64-bit atomics: https://bugs.llvm.org/show_bug.cgi?id=19355. The compiler defines
// __GCC_ATOMIC_LLONG_LOCK_FREE to 1 when the target architecture supports 64-bit atomic instructions (i.e. the value should be 2).
// Additionally, any clang version requires to link with libatomic for 64-bit __atomic* intrinsics on x86. It does generate
// cmpxchg8b for __sync_val_compare_and_swap though.
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && defined(BOOST_CLANG)
#undef BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
#define BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE 0
#endif
// Override arch-specific macros if atomic intrinsics provide better guarantees
#if !defined(BOOST_ATOMIC_INT128_LOCK_FREE) || (BOOST_ATOMIC_INT128_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE)
#undef BOOST_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_INT128_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
#endif
#if !defined(BOOST_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE) || (BOOST_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_INT128_LOCK_FREE)
#undef BOOST_ATOMIC_INT64_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE
#else
#define BOOST_ATOMIC_INT64_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
#endif
#endif
#if !defined(BOOST_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE) || (BOOST_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_INT64_LOCK_FREE)
#undef BOOST_ATOMIC_INT32_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE
#else
#define BOOST_ATOMIC_INT32_LOCK_FREE BOOST_ATOMIC_INT64_LOCK_FREE
#endif
#endif
#if !defined(BOOST_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE) || (BOOST_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_INT32_LOCK_FREE)
#undef BOOST_ATOMIC_INT16_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE
#else
#define BOOST_ATOMIC_INT16_LOCK_FREE BOOST_ATOMIC_INT32_LOCK_FREE
#endif
#endif
#if !defined(BOOST_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE) || (BOOST_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_INT16_LOCK_FREE)
#undef BOOST_ATOMIC_INT8_LOCK_FREE
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE
#else
#define BOOST_ATOMIC_INT8_LOCK_FREE BOOST_ATOMIC_INT16_LOCK_FREE
#endif
#endif
#if !defined(BOOST_ATOMIC_POINTER_LOCK_FREE) || (BOOST_ATOMIC_POINTER_LOCK_FREE < __GCC_ATOMIC_POINTER_LOCK_FREE)
#undef BOOST_ATOMIC_POINTER_LOCK_FREE
#define BOOST_ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
#endif
#if !defined(BOOST_ATOMIC_THREAD_FENCE) || (BOOST_ATOMIC_THREAD_FENCE < 2)
#undef BOOST_ATOMIC_THREAD_FENCE
#define BOOST_ATOMIC_THREAD_FENCE 2
#endif
#if !defined(BOOST_ATOMIC_SIGNAL_FENCE) || (BOOST_ATOMIC_SIGNAL_FENCE < 2)
#undef BOOST_ATOMIC_SIGNAL_FENCE
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_ATOMIC_HPP_INCLUDED_

View File

@ -0,0 +1,54 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_gcc_sync.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_

View File

@ -0,0 +1,35 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009, 2011 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* Linux-specific code by Phil Endecott
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_linux_arm.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_LINUX_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,33 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2012 - 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/caps_windows.hpp
*
* This header defines feature capabilities macros
*/
#ifndef BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
#define BOOST_ATOMIC_THREAD_FENCE 2
#define BOOST_ATOMIC_SIGNAL_FENCE 2
#endif // BOOST_ATOMIC_DETAIL_CAPS_WINDOWS_HPP_INCLUDED_

View File

@ -0,0 +1,50 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/cas_based_exchange.hpp
*
* This header contains CAS-based implementation of exchange operation.
*/
#ifndef BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base >
struct cas_based_exchange :
public Base
{
typedef typename Base::storage_type storage_type;
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
return old_val;
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CAS_BASED_EXCHANGE_HPP_INCLUDED_

View File

@ -0,0 +1,90 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/detail/classify.hpp
*
* This header contains type traits for type classification.
*/
#ifndef BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/type_traits/is_enum.hpp>
#include <boost/atomic/detail/type_traits/is_integral.hpp>
#include <boost/atomic/detail/type_traits/is_function.hpp>
#include <boost/atomic/detail/type_traits/is_floating_point.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename T, bool IsFunction = atomics::detail::is_function< T >::value >
struct classify_pointer
{
typedef void* type;
};
template< typename T >
struct classify_pointer< T, true >
{
typedef void type;
};
template<
typename T,
bool IsInt = atomics::detail::is_integral< T >::value,
bool IsFloat = atomics::detail::is_floating_point< T >::value,
bool IsEnum = atomics::detail::is_enum< T >::value
>
struct classify
{
typedef void type;
};
template< typename T >
struct classify< T, true, false, false > { typedef int type; };
#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
template< typename T >
struct classify< T, false, true, false > { typedef float type; };
#endif
template< typename T >
struct classify< T, false, false, true > { typedef const int type; };
template< typename T >
struct classify< T*, false, false, false > { typedef typename classify_pointer< T >::type type; };
template< >
struct classify< void*, false, false, false > { typedef void type; };
template< >
struct classify< const void*, false, false, false > { typedef void type; };
template< >
struct classify< volatile void*, false, false, false > { typedef void type; };
template< >
struct classify< const volatile void*, false, false, false > { typedef void type; };
template< typename T, typename U >
struct classify< T U::*, false, false, false > { typedef void type; };
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CLASSIFY_HPP_INCLUDED_

View File

@ -0,0 +1,143 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2012 Hartmut Kaiser
* Copyright (c) 2014-2018, 2020-2021 Andrey Semashev
*/
/*!
* \file atomic/detail/config.hpp
*
* This header defines configuraion macros for Boost.Atomic
*/
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
#include <boost/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__CUDACC__)
// nvcc does not support alternatives ("q,m") in asm statement constraints
#define BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES
// nvcc does not support condition code register ("cc") clobber in asm statements
#define BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC
#endif
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC)
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC "cc"
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "cc",
#else
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA
#endif
#if (defined(__i386__) || defined(__x86_64__)) && (defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC < 40500) || defined(__SUNPRO_CC))
// This macro indicates that the compiler does not support allocating eax:edx or rax:rdx register pairs ("A") in asm blocks
#define BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS
#endif
#if defined(__i386__) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && BOOST_GCC >= 50100))
// This macro indicates that asm blocks should preserve ebx value unchanged. Some compilers are able to maintain ebx themselves
// around the asm blocks. For those compilers we don't need to save/restore ebx in asm blocks.
#define BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX
#endif
#if defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
#if !(defined(BOOST_LIBSTDCXX11) && BOOST_LIBSTDCXX_VERSION >= 40700) /* libstdc++ from gcc >= 4.7 in C++11 mode */
// This macro indicates that there is not even a basic <type_traits> standard header that is sufficient for most Boost.Atomic needs.
#define BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS
#endif
#endif // defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
#if defined(BOOST_NO_CXX11_ALIGNAS) ||\
(defined(BOOST_GCC) && BOOST_GCC < 40900) ||\
(defined(BOOST_MSVC) && BOOST_MSVC < 1910 && defined(_M_IX86))
// gcc prior to 4.9 doesn't support alignas with a constant expression as an argument.
// MSVC 14.0 does support alignas, but in 32-bit mode emits "error C2719: formal parameter with requested alignment of N won't be aligned" for N > 4,
// when aligned types are used in function arguments, even though the std::max_align_t type has alignment of 8.
#define BOOST_ATOMIC_DETAIL_NO_CXX11_ALIGNAS
#endif
#if defined(BOOST_NO_CXX11_CONSTEXPR) || (defined(BOOST_GCC) && BOOST_GCC < 40800)
// This macro indicates that the compiler doesn't support constexpr constructors that initialize one member
// of an anonymous union member of the class.
#define BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT
#endif
#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_CONSTEXPR_UNION_INIT)
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT BOOST_CONSTEXPR
#else
#define BOOST_ATOMIC_DETAIL_CONSTEXPR_UNION_INIT
#endif
// Enable pointer/reference casts between storage and value when possible.
// Note: Despite that MSVC does not employ strict aliasing rules for optimizations
// and does not require an explicit markup for types that may alias, we still don't
// enable the optimization for this compiler because at least MSVC-8 and 9 are known
// to generate broken code sometimes when casts are used.
#define BOOST_ATOMIC_DETAIL_MAY_ALIAS BOOST_MAY_ALIAS
#if !defined(BOOST_NO_MAY_ALIAS)
#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS
#endif
#if defined(__GCC_ASM_FLAG_OUTPUTS__)
// The compiler supports output values in flag registers.
// See: https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html, Section 6.44.3.
#define BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS
#endif
#if defined(BOOST_INTEL) || (defined(BOOST_GCC) && BOOST_GCC < 40700) ||\
(defined(BOOST_CLANG) && !defined(__apple_build_version__) && (__clang_major__ * 100 + __clang_minor__) < 302) ||\
(defined(__clang__) && defined(__apple_build_version__) && (__clang_major__ * 100 + __clang_minor__) < 402)
// Intel compiler (at least 18.0 update 1) breaks if noexcept specification is used in defaulted function declarations:
// error: the default constructor of "boost::atomics::atomic<T>" cannot be referenced -- it is a deleted function
// GCC 4.6 doesn't seem to support that either. Clang 3.1 deduces wrong noexcept for the defaulted function and fails as well.
#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL BOOST_NOEXCEPT
#else
#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL BOOST_NOEXCEPT
#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
#endif
#if defined(__has_builtin)
#if __has_builtin(__builtin_constant_p)
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
#endif
#if __has_builtin(__builtin_clear_padding)
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_clear_padding(x)
#elif __has_builtin(__builtin_zero_non_value_bits)
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
#endif
#endif
#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT) && defined(__GNUC__)
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
#endif
#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT)
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) false
#endif
#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING) && defined(BOOST_MSVC) && BOOST_MSVC >= 1927
// Note that as of MSVC 19.29 this intrinsic does not clear padding in unions:
// https://developercommunity.visualstudio.com/t/__builtin_zero_non_value_bits-does-not-c/1551510
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x) __builtin_zero_non_value_bits(x)
#endif
#if !defined(BOOST_ATOMIC_DETAIL_CLEAR_PADDING)
#define BOOST_ATOMIC_NO_CLEAR_PADDING
#define BOOST_ATOMIC_DETAIL_CLEAR_PADDING(x)
#endif
#if (defined(__BYTE_ORDER__) && defined(__FLOAT_WORD_ORDER__) && __BYTE_ORDER__ == __FLOAT_WORD_ORDER__) ||\
defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
// This macro indicates that integer and floating point endianness is the same
#define BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH
#endif
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_

View File

@ -0,0 +1,50 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_operations.hpp
*
* This header defines core atomic operations, including the emulated version.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/core_operations_emulated.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/core_arch_ops_)
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Default specialization that falls back to lock-based implementation
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_arch_operations :
public core_operations_emulated< Size, storage_traits< Size >::alignment, Signed, Interprocess >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_operations_fwd.hpp
*
* This header contains forward declaration of the \c core_arch_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPERATIONS_FWD_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,867 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_ops_gcc_alpha.hpp
*
* This header contains implementation of the \c core_arch_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/*
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
(HP OpenVMS systems documentation) and the Alpha Architecture Reference Manual.
*/
/*
NB: The most natural thing would be to write the increment/decrement
operators along the following lines:
__asm__ __volatile__
(
"1: ldl_l %0,%1 \n"
"addl %0,1,%0 \n"
"stl_c %0,%1 \n"
"beq %0,1b\n"
: "=&b" (tmp)
: "m" (value)
: "cc"
);
However according to the comments on the HP website and matching
comments in the Linux kernel sources this defies branch prediction,
as the cpu assumes that backward branches are always taken; so
instead copy the trick from the Linux kernel, introduce a forward
branch and back again.
I have, however, had a hard time measuring the difference between
the two versions in microbenchmarks -- I am leaving it in nevertheless
as it apparently does not hurt either.
*/
struct core_arch_operations_gcc_alpha_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("mb" ::: "memory");
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 4u, Signed, Interprocess > :
public core_arch_operations_gcc_alpha_base
{
typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"mov %3, %1\n\t"
"ldl_l %0, %2\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (tmp) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
int success;
storage_type current;
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %2, %4\n\t" // current = *(&storage)
"cmpeq %2, %0, %3\n\t" // success = current == expected
"mov %2, %0\n\t" // expected = current
"beq %3, 2f\n\t" // if (success == 0) goto end
"stl_c %1, %4\n\t" // storage = desired; desired = store succeeded
"mov %1, %3\n\t" // success = desired
"2:\n\t"
: "+r" (expected), // %0
"+r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
storage_type current, tmp;
fence_before(success_order);
__asm__ __volatile__
(
"1:\n\t"
"mov %5, %1\n\t" // tmp = desired
"ldl_l %2, %4\n\t" // current = *(&storage)
"cmpeq %2, %0, %3\n\t" // success = current == expected
"mov %2, %0\n\t" // expected = current
"beq %3, 2f\n\t" // if (success == 0) goto end
"stl_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
"beq %1, 3f\n\t" // if (tmp == 0) goto retry
"mov %1, %3\n\t" // success = tmp
"2:\n\t"
".subsection 2\n\t"
"3: br 1b\n\t"
".previous\n\t"
: "+r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage), // %4
"r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"addl %0, %3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"subl %0, %3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"and %0, %3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"bis %0, %3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"xor %0, %3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, 0, order);
}
};
template< bool Interprocess >
struct core_arch_operations< 1u, false, Interprocess > :
public core_arch_operations< 4u, false, Interprocess >
{
typedef core_arch_operations< 4u, false, Interprocess > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"addl %0, %3, %1\n\t"
"zapnot %1, 1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"subl %0, %3, %1\n\t"
"zapnot %1, 1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
};
template< bool Interprocess >
struct core_arch_operations< 1u, true, Interprocess > :
public core_arch_operations< 4u, true, Interprocess >
{
typedef core_arch_operations< 4u, true, Interprocess > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"addl %0, %3, %1\n\t"
"sextb %1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"subl %0, %3, %1\n\t"
"sextb %1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
};
template< bool Interprocess >
struct core_arch_operations< 2u, false, Interprocess > :
public core_arch_operations< 4u, false, Interprocess >
{
typedef core_arch_operations< 4u, false, Interprocess > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"addl %0, %3, %1\n\t"
"zapnot %1, 3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"subl %0, %3, %1\n\t"
"zapnot %1, 3, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
};
template< bool Interprocess >
struct core_arch_operations< 2u, true, Interprocess > :
public core_arch_operations< 4u, true, Interprocess >
{
typedef core_arch_operations< 4u, true, Interprocess > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"addl %0, %3, %1\n\t"
"sextw %1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
base_type::fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldl_l %0, %2\n\t"
"subl %0, %3, %1\n\t"
"sextw %1, %1\n\t"
"stl_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
base_type::fence_after(order);
return original;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 8u, Signed, Interprocess > :
public core_arch_operations_gcc_alpha_base
{
typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, tmp;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"mov %3, %1\n\t"
"ldq_l %0, %2\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (tmp) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
int success;
storage_type current;
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %2, %4\n\t" // current = *(&storage)
"cmpeq %2, %0, %3\n\t" // success = current == expected
"mov %2, %0\n\t" // expected = current
"beq %3, 2f\n\t" // if (success == 0) goto end
"stq_c %1, %4\n\t" // storage = desired; desired = store succeeded
"mov %1, %3\n\t" // success = desired
"2:\n\t"
: "+r" (expected), // %0
"+r" (desired), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
int success;
storage_type current, tmp;
fence_before(success_order);
__asm__ __volatile__
(
"1:\n\t"
"mov %5, %1\n\t" // tmp = desired
"ldq_l %2, %4\n\t" // current = *(&storage)
"cmpeq %2, %0, %3\n\t" // success = current == expected
"mov %2, %0\n\t" // expected = current
"beq %3, 2f\n\t" // if (success == 0) goto end
"stq_c %1, %4\n\t" // storage = tmp; tmp = store succeeded
"beq %1, 3f\n\t" // if (tmp == 0) goto retry
"mov %1, %3\n\t" // success = tmp
"2:\n\t"
".subsection 2\n\t"
"3: br 1b\n\t"
".previous\n\t"
: "+r" (expected), // %0
"=&r" (tmp), // %1
"=&r" (current), // %2
"=&r" (success) // %3
: "m" (storage), // %4
"r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
return !!success;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %0, %2\n\t"
"addq %0, %3, %1\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %0, %2\n\t"
"subq %0, %3, %1\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %0, %2\n\t"
"and %0, %3, %1\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %0, %2\n\t"
"bis %0, %3, %1\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, modified;
fence_before(order);
__asm__ __volatile__
(
"1:\n\t"
"ldq_l %0, %2\n\t"
"xor %0, %3, %1\n\t"
"stq_c %1, %2\n\t"
"beq %1, 2f\n\t"
".subsection 2\n\t"
"2: br 1b\n\t"
".previous\n\t"
: "=&r" (original), // %0
"=&r" (modified) // %1
: "m" (storage), // %2
"r" (v) // %3
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
return original;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,215 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2010 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_ops_gcc_sparc.hpp
*
* This header contains implementation of the \c core_arch_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/core_ops_cas_based.hpp>
#include <boost/atomic/detail/cas_based_exchange.hpp>
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct gcc_sparc_cas_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
}
};
template< bool Signed, bool Interprocess >
struct gcc_sparc_cas32 :
public gcc_sparc_cas_base
{
typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
storage_type previous = expected;
__asm__ __volatile__
(
"cas [%1], %2, %0"
: "+r" (desired)
: "r" (&storage), "r" (previous)
: "memory"
);
const bool success = (desired == previous);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = desired;
return success;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
__asm__ __volatile__
(
"swap [%1], %0"
: "+r" (v)
: "r" (&storage)
: "memory"
);
fence_after(order);
return v;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 4u, Signed, Interprocess > :
public core_operations_cas_based< gcc_sparc_cas32< Signed, Interprocess > >
{
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
template< bool Signed, bool Interprocess >
struct gcc_sparc_cas64 :
public gcc_sparc_cas_base
{
typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
fence_before(success_order);
storage_type previous = expected;
__asm__ __volatile__
(
"casx [%1], %2, %0"
: "+r" (desired)
: "r" (&storage), "r" (previous)
: "memory"
);
const bool success = (desired == previous);
if (success)
fence_after(success_order);
else
fence_after(failure_order);
expected = desired;
return success;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 8u, Signed, Interprocess > :
public core_operations_cas_based< cas_based_exchange< gcc_sparc_cas64< Signed, Interprocess > > >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,828 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_ops_msvc_arm.hpp
*
* This header contains implementation of the \c core_arch_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/fence_arch_operations.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
extern "C" {
__int8 __iso_volatile_load8(const volatile __int8*);
__int16 __iso_volatile_load16(const volatile __int16*);
__int32 __iso_volatile_load32(const volatile __int32*);
__int64 __iso_volatile_load64(const volatile __int64*);
void __iso_volatile_store8(volatile __int8*, __int8);
void __iso_volatile_store16(volatile __int16*, __int16);
void __iso_volatile_store32(volatile __int32*, __int32);
void __iso_volatile_store64(volatile __int64*, __int64);
}
#if defined(BOOST_MSVC)
#pragma intrinsic(__iso_volatile_load8)
#pragma intrinsic(__iso_volatile_load16)
#pragma intrinsic(__iso_volatile_load32)
#pragma intrinsic(__iso_volatile_load64)
#pragma intrinsic(__iso_volatile_store8)
#pragma intrinsic(__iso_volatile_store16)
#pragma intrinsic(__iso_volatile_store32)
#pragma intrinsic(__iso_volatile_store64)
#endif
#define BOOST_ATOMIC_DETAIL_ARM_LOAD8(p) __iso_volatile_load8((const volatile __int8*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD16(p) __iso_volatile_load16((const volatile __int16*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD32(p) __iso_volatile_load32((const volatile __int32*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_LOAD64(p) __iso_volatile_load64((const volatile __int64*)(p))
#define BOOST_ATOMIC_DETAIL_ARM_STORE8(p, v) __iso_volatile_store8((volatile __int8*)(p), (__int8)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE16(p, v) __iso_volatile_store16((volatile __int16*)(p), (__int16)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE32(p, v) __iso_volatile_store32((volatile __int32*)(p), (__int32)(v))
#define BOOST_ATOMIC_DETAIL_ARM_STORE64(p, v) __iso_volatile_store64((volatile __int64*)(p), (__int64)(v))
namespace boost {
namespace atomics {
namespace detail {
// A note about memory_order_consume. Technically, this architecture allows to avoid
// unnecessary memory barrier after consume load since it supports data dependency ordering.
// However, some compiler optimizations may break a seemingly valid code relying on data
// dependency tracking by injecting bogus branches to aid out of order execution.
// This may happen not only in Boost.Atomic code but also in user's code, which we have no
// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
// For this reason we promote memory_order_consume to memory_order_acquire.
struct core_arch_operations_msvc_arm_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
fence_arch_operations::hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
// Combine order flags together and promote memory_order_consume to memory_order_acquire
return static_cast< memory_order >(((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & ~static_cast< unsigned int >(memory_order_consume))
| (((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & static_cast< unsigned int >(memory_order_consume)) << 1u));
}
};
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
struct core_arch_operations_msvc_arm :
public core_arch_operations_msvc_arm_base
{
typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
Derived::store(storage, (storage_type)0, order);
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 1u, Signed, Interprocess > :
public core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_arm< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE8(&storage, v);
base_type::fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD8(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
break;
}
return v;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 2u, Signed, Interprocess > :
public core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_arm< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE16(&storage, v);
base_type::fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD16(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
break;
}
return v;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 4u, Signed, Interprocess > :
public core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_arm< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE32(&storage, v);
base_type::fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD32(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
break;
}
return v;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 8u, Signed, Interprocess > :
public core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_arm< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
BOOST_ATOMIC_DETAIL_ARM_STORE64(&storage, v);
base_type::fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = BOOST_ATOMIC_DETAIL_ARM_LOAD64(&storage);
base_type::fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected, old_val;
switch (base_type::cas_common_order(success_order, failure_order))
{
case memory_order_relaxed:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELAXED(&storage, desired, previous));
break;
case memory_order_consume:
case memory_order_acquire:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_ACQUIRE(&storage, desired, previous));
break;
case memory_order_release:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64_RELEASE(&storage, desired, previous));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
break;
}
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
break;
}
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_relaxed:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELAXED(&storage, v));
break;
case memory_order_consume:
case memory_order_acquire:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(&storage, v));
break;
case memory_order_release:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(&storage, v));
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
break;
}
return v;
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD8
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD16
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD32
#undef BOOST_ATOMIC_DETAIL_ARM_LOAD64
#undef BOOST_ATOMIC_DETAIL_ARM_STORE8
#undef BOOST_ATOMIC_DETAIL_ARM_STORE16
#undef BOOST_ATOMIC_DETAIL_ARM_STORE32
#undef BOOST_ATOMIC_DETAIL_ARM_STORE64
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,903 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_arch_ops_msvc_x86.hpp
*
* This header contains implementation of the \c core_arch_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/intptr.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/cas_based_exchange.hpp>
#include <boost/atomic/detail/core_ops_cas_based.hpp>
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && defined(__AVX__)
#include <emmintrin.h>
#include <boost/atomic/detail/string_ops.hpp>
#endif
#include <boost/atomic/detail/ops_msvc_common.hpp>
#if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
/*
* Implementation note for asm blocks.
*
* http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29
*
* Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.
* To be able to access both the local variables and the function parameters after the alignment, the compiler
* maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.
* If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX
* in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.
* Either move the eight-byte aligned types out of the function, or avoid using EBX.
*
* Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
* whenever we have to clobber it. Additionally, we disable warning C4731 in header.hpp so that the compiler
* doesn't spam about ebx use.
*/
struct core_arch_operations_msvc_x86_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
// On x86 and x86_64 there is no need for a hardware barrier,
// even if seq_cst memory order is requested, because all
// seq_cst writes are implemented with lock-prefixed operations
// or xchg which has implied lock prefix. Therefore normal loads
// are already ordered with seq_cst stores on these architectures.
}
};
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
struct core_arch_operations_msvc_x86 :
public core_arch_operations_msvc_x86_base
{
typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_seq_cst)
{
fence_before(order);
storage = v;
fence_after(order);
}
else
{
Derived::exchange(storage, v, order);
}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 4u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 4u, Signed, Interprocess, core_arch_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
return res;
}
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
return res;
}
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
}
#else
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
return res;
}
#endif
};
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
template< bool Signed, bool Interprocess >
struct core_arch_operations< 1u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
}
};
#elif defined(_M_IX86)
template< bool Signed, bool Interprocess >
struct core_arch_operations< 1u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 1u, Signed, Interprocess, core_arch_operations< 1u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
lock xadd byte ptr [edx], al
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
xchg byte ptr [edx], al
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
{
base_type::fence_before(success_order);
bool success;
__asm
{
mov esi, expected
mov edi, storage
movzx eax, byte ptr [esi]
movzx edx, desired
lock cmpxchg byte ptr [edi], dl
mov byte ptr [esi], al
sete success
};
// The success and failure fences are equivalent anyway
base_type::fence_after(success_order);
return success;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
and dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
or dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
xor dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
};
base_type::fence_after(order);
return v;
}
};
#else
template< bool Signed, bool Interprocess >
struct core_arch_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
#endif
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
template< bool Signed, bool Interprocess >
struct core_arch_operations< 2u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
}
};
#elif defined(_M_IX86)
template< bool Signed, bool Interprocess >
struct core_arch_operations< 2u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 2u, Signed, Interprocess, core_arch_operations< 2u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
lock xadd word ptr [edx], ax
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edx, storage
movzx eax, v
xchg word ptr [edx], ax
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
{
base_type::fence_before(success_order);
bool success;
__asm
{
mov esi, expected
mov edi, storage
movzx eax, word ptr [esi]
movzx edx, desired
lock cmpxchg word ptr [edi], dx
mov word ptr [esi], ax
sete success
};
// The success and failure fences are equivalent anyway
base_type::fence_after(success_order);
return success;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
and dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
or dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
__asm
{
mov edi, storage
movzx ecx, v
xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
xor dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
};
base_type::fence_after(order);
return v;
}
};
#else
template< bool Signed, bool Interprocess >
struct core_arch_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_arch_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
template< bool Signed, bool Interprocess >
struct msvc_dcas_x86
{
typedef typename storage_traits< 8u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
//
// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
// * Reading or writing a quadword aligned on a 64-bit boundary
//
// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
// have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type volatile* p = &storage;
if (BOOST_LIKELY(order != memory_order_seq_cst && ((uintptr_t)p & 7u) == 0u))
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
__asm
{
mov edx, p
vmovq xmm4, v
vmovq qword ptr [edx], xmm4
};
#else
__asm
{
mov edx, p
movq xmm4, v
movq qword ptr [edx], xmm4
};
#endif
#else
__asm
{
mov edx, p
fild v
fistp qword ptr [edx]
};
#endif
}
else
{
uint32_t backup;
__asm
{
mov backup, ebx
mov edi, p
mov ebx, dword ptr [v]
mov ecx, dword ptr [v + 4]
mov eax, dword ptr [edi]
mov edx, dword ptr [edi + 4]
align 16
again:
lock cmpxchg8b qword ptr [edi]
jne again
mov ebx, backup
};
}
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type const volatile* p = &storage;
storage_type value;
if (BOOST_LIKELY(((uintptr_t)p & 7u) == 0u))
{
#if defined(_M_IX86_FP) && _M_IX86_FP >= 2
#if defined(__AVX__)
__asm
{
mov edx, p
vmovq xmm4, qword ptr [edx]
vmovq value, xmm4
};
#else
__asm
{
mov edx, p
movq xmm4, qword ptr [edx]
movq value, xmm4
};
#endif
#else
__asm
{
mov edx, p
fild qword ptr [edx]
fistp value
};
#endif
}
else
{
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
__asm
{
mov edi, p
mov eax, ebx
mov edx, ecx
lock cmpxchg8b qword ptr [edi]
mov dword ptr [value], eax
mov dword ptr [value + 4], edx
};
}
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
// MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,
// even though the _InterlockedCompareExchange64 intrinsic already provides one.
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type volatile* p = &storage;
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
const bool result = (old_val == expected);
expected = old_val;
#else
bool result;
uint32_t backup;
__asm
{
mov backup, ebx
mov edi, p
mov esi, expected
mov ebx, dword ptr [desired]
mov ecx, dword ptr [desired + 4]
mov eax, dword ptr [esi]
mov edx, dword ptr [esi + 4]
lock cmpxchg8b qword ptr [edi]
mov dword ptr [esi], eax
mov dword ptr [esi + 4], edx
mov ebx, backup
sete result
};
#endif
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
return result;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type volatile* p = &storage;
uint32_t backup;
__asm
{
mov backup, ebx
mov edi, p
mov ebx, dword ptr [v]
mov ecx, dword ptr [v + 4]
mov eax, dword ptr [edi]
mov edx, dword ptr [edi + 4]
align 16
again:
lock cmpxchg8b qword ptr [edi]
jne again
mov ebx, backup
mov dword ptr [v], eax
mov dword ptr [v + 4], edx
};
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
return v;
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 8u, Signed, Interprocess > :
public core_operations_cas_based< msvc_dcas_x86< Signed, Interprocess > >
{
};
#elif defined(_M_AMD64)
template< bool Signed, bool Interprocess >
struct core_arch_operations< 8u, Signed, Interprocess > :
public core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > >
{
typedef core_arch_operations_msvc_x86< 8u, Signed, Interprocess, core_arch_operations< 8u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
expected = old_val;
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
}
};
#endif
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
template< bool Signed, bool Interprocess >
struct msvc_dcas_x86_64
{
typedef typename storage_traits< 16u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 16u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(__AVX__)
if (BOOST_LIKELY(order != memory_order_seq_cst && (((uintptr_t)&storage) & 15u) == 0u))
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
__m128i value;
BOOST_ATOMIC_DETAIL_MEMCPY(&value, &v, sizeof(value));
_mm_store_si128(const_cast< __m128i* >(reinterpret_cast< volatile __m128i* >(&storage)), value);
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
return;
}
#endif // defined(__AVX__)
storage_type value = const_cast< storage_type& >(storage);
while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
storage_type value;
#if defined(__AVX__)
if (BOOST_LIKELY((((uintptr_t)&storage) & 15u) == 0u))
{
__m128i v = _mm_load_si128(const_cast< const __m128i* >(reinterpret_cast< const volatile __m128i* >(&storage)));
BOOST_ATOMIC_DETAIL_MEMCPY(&value, &v, sizeof(value));
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
return value;
}
#endif // defined(__AVX__)
value = storage_type();
BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
};
template< bool Signed, bool Interprocess >
struct core_arch_operations< 16u, Signed, Interprocess > :
public core_operations_cas_based< cas_based_exchange< msvc_dcas_x86_64< Signed, Interprocess > > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_

View File

@ -0,0 +1,49 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_operations.hpp
*
* This header defines core atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#include <boost/atomic/detail/core_arch_operations.hpp>
#include <boost/atomic/detail/core_operations_fwd.hpp>
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/core_ops_)
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Default specialization that falls back to architecture-specific implementation
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_operations :
public core_arch_operations< Size, Signed, Interprocess >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,195 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/core_operations_emulated.hpp
*
* This header contains lock pool-based implementation of the core atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_
#include <cstddef>
#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_operations_emulated_fwd.hpp>
#include <boost/atomic/detail/lock_pool.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t Size, std::size_t Alignment, bool = Alignment >= storage_traits< Size >::native_alignment >
struct core_operations_emulated_base
{
typedef typename storage_traits< Size >::type storage_type;
};
template< std::size_t Size, std::size_t Alignment >
struct core_operations_emulated_base< Size, Alignment, false >
{
typedef buffer_storage< Size, Alignment > storage_type;
};
//! Emulated implementation of core atomic operations
template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
struct core_operations_emulated :
public core_operations_emulated_base< Size, Alignment >
{
typedef core_operations_emulated_base< Size, Alignment > base_type;
// Define storage_type to have alignment not greater than Alignment. This will allow operations to work with value_types
// that possibly have weaker alignment requirements than storage_traits< Size >::type would. This is important for atomic_ref<>.
// atomic<> will allow higher alignment requirement than its value_type.
// Note that storage_type should be an integral type, if possible, so that arithmetic and bitwise operations are possible.
typedef typename base_type::storage_type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = Alignment >= storage_traits< Size >::alignment ? storage_traits< Size >::alignment : Alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = false;
typedef lock_pool::scoped_lock< storage_alignment > scoped_lock;
static void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
scoped_lock lock(&storage);
const_cast< storage_type& >(storage) = v;
}
static storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
scoped_lock lock(&storage);
return const_cast< storage_type const& >(storage);
}
static storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s += v;
return old_val;
}
static storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s -= v;
return old_val;
}
static storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s = v;
return old_val;
}
static bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
const bool res = old_val == expected;
if (res)
s = desired;
expected = old_val;
return res;
}
static bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
// Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
// is that MSVC-12 ICEs in this case.
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
const bool res = old_val == expected;
if (res)
s = desired;
expected = old_val;
return res;
}
static storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s &= v;
return old_val;
}
static storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s |= v;
return old_val;
}
static storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s ^= v;
return old_val;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
store(storage, (storage_type)0, order);
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_HPP_INCLUDED_

View File

@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/core_operations_emulated_fwd.hpp
*
* This header forward-declares lock pool-based implementation of the core atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t Size, std::size_t Alignment, bool Signed, bool Interprocess >
struct core_operations_emulated;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_EMULATED_FWD_HPP_INCLUDED_

View File

@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_operations_fwd.hpp
*
* This header contains forward declaration of the \c core_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPERATIONS_FWD_HPP_INCLUDED_

View File

@ -0,0 +1,94 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_ops_cas_based.hpp
*
* This header contains CAS-based implementation of core atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base >
struct core_operations_cas_based :
public Base
{
typedef typename Base::storage_type storage_type;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Base::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
Base::store(storage, (storage_type)0, order);
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_CAS_BASED_HPP_INCLUDED_

View File

@ -0,0 +1,306 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014, 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/core_ops_gcc_atomic.hpp
*
* This header contains implementation of the \c core_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_operations_fwd.hpp>
#include <boost/atomic/detail/core_arch_operations.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE || BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE ||\
BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE || BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE < BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE
// There are platforms where we need to use larger storage types
#include <boost/atomic/detail/int_sizes.hpp>
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#endif
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__INTEL_COMPILER)
// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
// for Intel Compiler.
// In debug builds the compiler does not inline any functions, so basically
// every atomic function call results in this warning. I don't know any other
// way to selectively disable just this one warning.
#pragma system_header
#endif
namespace boost {
namespace atomics {
namespace detail {
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_operations_gcc_atomic
{
typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
// Note: In the current implementation, core_operations_gcc_atomic are used only when the particularly sized __atomic
// intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
// always set is_always_lock_free to true here.
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_GCC) && BOOST_GCC < 100100 && (defined(__x86_64__) || defined(__i386__))
// gcc up to 10.1 generates mov + mfence for seq_cst stores, which is slower than xchg
if (order != memory_order_seq_cst)
__atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
else
__atomic_exchange_n(&storage, v, __ATOMIC_SEQ_CST);
#else
__atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
#endif
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_AARCH64_HAS_RCPC)
// At least gcc 9.3 and clang 10 do not generate relaxed ldapr instructions that are available in ARMv8.3-RCPC extension.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95751
typedef atomics::detail::core_arch_operations< storage_size, is_signed, is_interprocess > core_arch_operations;
return core_arch_operations::load(storage, order);
#else
return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
#endif
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return __atomic_compare_exchange_n
(
&storage, &expected, desired, false,
atomics::detail::convert_memory_order_to_gcc(success_order),
atomics::detail::convert_memory_order_to_gcc(failure_order)
);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return __atomic_compare_exchange_n
(
&storage, &expected, desired, true,
atomics::detail::convert_memory_order_to_gcc(success_order),
atomics::detail::convert_memory_order_to_gcc(failure_order)
);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
__atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
}
};
// We want to only enable __atomic* intrinsics when the corresponding BOOST_ATOMIC_DETAIL_GCC_ATOMIC_*_LOCK_FREE macro indicates
// the same or better lock-free guarantees as the BOOST_ATOMIC_*_LOCK_FREE macro. Otherwise, we want to leave core_operations
// unspecialized, so that core_arch_operations is used instead.
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0 && BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT128_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 16u, Signed, Interprocess > :
public core_operations_gcc_atomic< 16u, Signed, Interprocess >
{
};
#endif
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 8u, Signed, Interprocess > :
public core_operations_gcc_atomic< 8u, Signed, Interprocess >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT64_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 8u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 8u, Signed >
{
};
#endif
#endif // BOOST_ATOMIC_INT64_LOCK_FREE > 0
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 4u, Signed, Interprocess > :
public core_operations_gcc_atomic< 4u, Signed, Interprocess >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 4u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 4u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT32_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 8u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 4u, Signed >
{
};
#endif
#endif // BOOST_ATOMIC_INT32_LOCK_FREE > 0
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public core_operations_gcc_atomic< 2u, Signed, Interprocess >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 2u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 2u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT16_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 2u, Signed >
{
};
#endif
#endif // BOOST_ATOMIC_INT16_LOCK_FREE > 0
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
#if BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT8_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public core_operations_gcc_atomic< 1u, Signed, Interprocess >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT16_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 2u, Signed, Interprocess >, 1u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT32_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 4u, Signed, Interprocess >, 1u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT64_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 8u, Signed, Interprocess >, 1u, Signed >
{
};
#elif BOOST_ATOMIC_DETAIL_GCC_ATOMIC_INT128_LOCK_FREE >= BOOST_ATOMIC_INT8_LOCK_FREE
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_gcc_atomic< 16u, Signed, Interprocess >, 1u, Signed >
{
};
#endif
#endif // BOOST_ATOMIC_INT8_LOCK_FREE > 0
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_ATOMIC_HPP_INCLUDED_

View File

@ -0,0 +1,263 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_ops_gcc_sync.hpp
*
* This header contains implementation of the \c core_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_operations_fwd.hpp>
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#include <boost/atomic/detail/type_traits/integral_constant.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct core_operations_gcc_sync_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__sync_synchronize();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
__sync_synchronize();
}
};
template< std::size_t Size, bool Signed, bool Interprocess >
struct core_operations_gcc_sync :
public core_operations_gcc_sync_base
{
typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< storage_size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
// In general, we cannot guarantee atomicity of plain loads and stores of anything larger than a single byte on
// an arbitrary CPU architecture. However, all modern architectures seem to guarantee atomic loads and stores of
// suitably aligned objects of up to a pointer size. For larger objects we should probably use intrinsics to guarantee
// atomicity. If there appears an architecture where this doesn't hold, this threshold needs to be updated (patches are welcome).
typedef atomics::detail::integral_constant< bool, storage_size <= sizeof(void*) > plain_stores_loads_are_atomic;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
store(storage, v, order, plain_stores_loads_are_atomic());
}
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order, atomics::detail::false_type) BOOST_NOEXCEPT
{
exchange(storage, v, order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return load(storage, order, plain_stores_loads_are_atomic());
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order, atomics::detail::true_type) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order, atomics::detail::false_type) BOOST_NOEXCEPT
{
// Note: don't use fetch_add or other arithmetics here since storage_type may not be an arithmetic type.
storage_type expected = storage_type();
storage_type desired = expected;
// We don't care if CAS succeeds or not. If it does, it will just write the same value there was before.
return __sync_val_compare_and_swap(const_cast< storage_type volatile* >(&storage), expected, desired);
}
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_add(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_sub(&storage, v);
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
// GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
// std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
// add a check here and fall back to a CAS loop.
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return __sync_lock_test_and_set(&storage, v);
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
storage_type expected2 = expected;
storage_type old_val = __sync_val_compare_and_swap(&storage, expected2, desired);
if (old_val == expected2)
{
return true;
}
else
{
expected = old_val;
return false;
}
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_and(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_or(&storage, v);
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return __sync_fetch_and_xor(&storage, v);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return !!__sync_lock_test_and_set(&storage, 1);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
__sync_lock_release(&storage);
if (order == memory_order_seq_cst)
__sync_synchronize();
}
};
#if BOOST_ATOMIC_INT8_LOCK_FREE > 0
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
public core_operations_gcc_sync< 1u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 2u, Signed, Interprocess >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 1u, Signed >
#else
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 1u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT16_LOCK_FREE > 0
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
public core_operations_gcc_sync< 2u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 4u, Signed, Interprocess >, 2u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 2u, Signed >
#else
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 2u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT32_LOCK_FREE > 0
template< bool Signed, bool Interprocess >
struct core_operations< 4u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
public core_operations_gcc_sync< 4u, Signed, Interprocess >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public extending_cas_based_arithmetic< core_operations_gcc_sync< 8u, Signed, Interprocess >, 4u, Signed >
#else
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 4u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT64_LOCK_FREE > 0
template< bool Signed, bool Interprocess >
struct core_operations< 8u, Signed, Interprocess > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
public core_operations_gcc_sync< 8u, Signed, Interprocess >
#else
public extending_cas_based_arithmetic< core_operations_gcc_sync< 16u, Signed, Interprocess >, 8u, Signed >
#endif
{
};
#endif
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
template< bool Signed, bool Interprocess >
struct core_operations< 16u, Signed, Interprocess > :
public core_operations_gcc_sync< 16u, Signed, Interprocess >
{
};
#endif
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_GCC_SYNC_HPP_INCLUDED_

View File

@ -0,0 +1,169 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009, 2011 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* Linux-specific code by Phil Endecott
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_ops_linux_arm.hpp
*
* This header contains implementation of the \c core_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_operations_fwd.hpp>
#include <boost/atomic/detail/core_ops_cas_based.hpp>
#include <boost/atomic/detail/cas_based_exchange.hpp>
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#include <boost/atomic/detail/fence_operations.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
// Different ARM processors have different atomic instructions. In particular,
// architecture versions before v6 (which are still in widespread use, e.g. the
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
// On Linux the kernel provides some support that lets us abstract away from
// these differences: it provides emulated CAS and barrier functions at special
// addresses that are guaranteed not to be interrupted by the kernel. Using
// this facility is slightly slower than inline assembler would be, but much
// faster than a system call.
//
// https://lwn.net/Articles/314561/
//
// While this emulated CAS is "strong" in the sense that it does not fail
// "spuriously" (i.e.: it never fails to perform the exchange when the value
// found equals the value expected), it does not return the found value on
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
// return the found value on failure, and we have to manually load this value
// after the emulated CAS reports failure. This in turn introduces a race
// between the CAS failing (due to the "wrong" value being found) and subsequently
// loading (which might turn up the "right" value). From an application's
// point of view this looks like "spurious failure", and therefore the
// emulated CAS is only good enough to provide compare_exchange_weak
// semantics.
struct linux_arm_cas_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
fence_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
fence_operations::hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
fence_operations::hardware_full_fence();
}
};
template< bool Signed, bool Interprocess >
struct linux_arm_cas :
public linux_arm_cas_base
{
typedef typename storage_traits< 4u >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
storage = v;
fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v = storage;
fence_after_load(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
while (true)
{
storage_type tmp = expected;
if (compare_exchange_weak(storage, tmp, desired, success_order, failure_order))
return true;
if (tmp != expected)
{
expected = tmp;
return false;
}
}
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
typedef storage_type (*kernel_cmpxchg32_t)(storage_type oldval, storage_type newval, volatile storage_type* ptr);
if (((kernel_cmpxchg32_t)0xffff0fc0)(expected, desired, &storage) == 0)
{
return true;
}
else
{
expected = storage;
return false;
}
}
};
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 1u, Signed >
{
};
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >, 2u, Signed >
{
};
template< bool Signed, bool Interprocess >
struct core_operations< 4u, Signed, Interprocess > :
public core_operations_cas_based< cas_based_exchange< linux_arm_cas< Signed, Interprocess > > >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_LINUX_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,201 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/core_ops_windows.hpp
*
* This header contains implementation of the \c core_operations template.
*
* This implementation is the most basic version for Windows. It should
* work for any non-MSVC-like compilers as long as there are Interlocked WinAPI
* functions available. This version is also used for WinCE.
*
* Notably, this implementation is not as efficient as other
* versions based on compiler intrinsics.
*/
#ifndef BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/core_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/make_signed.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/extending_cas_based_arithmetic.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
struct core_operations_windows_base
{
static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
};
template< std::size_t Size, bool Signed, bool Interprocess, typename Derived >
struct core_operations_windows :
public core_operations_windows_base
{
typedef typename storage_traits< Size >::type storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
Derived::exchange(storage, v, order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return Derived::fetch_add(const_cast< storage_type volatile& >(storage), (storage_type)0, order);
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!Derived::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
store(storage, (storage_type)0, order);
}
};
template< bool Signed, bool Interprocess >
struct core_operations< 4u, Signed, bool Interprocess > :
public core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > >
{
typedef core_operations_windows< 4u, Signed, Interprocess, core_operations< 4u, Signed, Interprocess > > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
base_type::fence_after(order);
return v;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
storage_type previous = expected;
base_type::fence_before(success_order);
storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
expected = old_val;
// The success and failure fences are the same anyway
base_type::fence_after(success_order);
return (previous == old_val);
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
return res;
#endif
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
return res;
#endif
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
base_type::fence_before(order);
v = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
base_type::fence_after(order);
return v;
#else
storage_type res = storage;
while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
return res;
#endif
}
};
template< bool Signed, bool Interprocess >
struct core_operations< 1u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 1u, Signed >
{
};
template< bool Signed, bool Interprocess >
struct core_operations< 2u, Signed, Interprocess > :
public extending_cas_based_arithmetic< core_operations< 4u, Signed, Interprocess >, 2u, Signed >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_CORE_OPS_WINDOWS_HPP_INCLUDED_

View File

@ -0,0 +1,72 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/extending_cas_based_arithmetic.hpp
*
* This header contains a boilerplate of core atomic operations that require sign/zero extension in arithmetic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/integral_conversions.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base, std::size_t Size, bool Signed >
struct extending_cas_based_arithmetic :
public Base
{
typedef typename Base::storage_type storage_type;
typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
storage_type new_val;
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
}
while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
storage_type new_val;
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
}
while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return old_val;
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTENDING_CAS_BASED_ARITHMETIC_HPP_INCLUDED_

View File

@ -0,0 +1,28 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_fp_operations.hpp
*
* This header defines extra floating point atomic operations, including the generic version.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/extra_fp_ops_generic.hpp>
#include <boost/atomic/detail/extra_fp_ops_emulated.hpp>
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_GENERIC)
#include BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_HEADER(boost/atomic/detail/extra_fp_ops_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_fp_operations_fwd.hpp
*
* This header contains forward declaration of the \c extra_fp_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base, typename Value = typename Base::value_type, std::size_t Size = sizeof(typename Base::storage_type), bool = Base::is_always_lock_free >
struct extra_fp_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_

View File

@ -0,0 +1,118 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_fp_ops_emulated.hpp
*
* This header contains emulated (lock-based) implementation of the extra floating point atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
#include <cstddef>
#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Emulated implementation of extra floating point operations
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations_emulated :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
typedef typename base_type::scoped_lock scoped_lock;
static value_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = -old_val;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return old_val;
}
static value_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = -old_val;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return new_val;
}
static value_type add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val + v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return new_val;
}
static value_type sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
value_type new_val = old_val - v;
s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
return new_val;
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_sub(storage, v, order);
}
};
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations< Base, Value, Size, false > :
public extra_fp_operations_emulated< Base, Value, Size >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_

View File

@ -0,0 +1,192 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_fp_ops_generic.hpp
*
* This header contains generic implementation of the extra floating point atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_fp_cast.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/is_iec559.hpp>
#include <boost/atomic/detail/type_traits/is_integral.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(BOOST_GCC) && BOOST_GCC >= 60000
#pragma GCC diagnostic push
// ignoring attributes on template argument X - this warning is because we need to pass storage_type as a template argument; no problem in this case
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Negate implementation
template<
typename Base,
typename Value,
std::size_t Size
#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
, bool = atomics::detail::is_iec559< Value >::value && atomics::detail::is_integral< typename Base::storage_type >::value
#endif
>
struct extra_fp_negate_generic :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_storage, new_storage;
value_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_storage);
do
{
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
new_val = -old_val;
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
}
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
return old_val;
}
static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_storage, new_storage;
value_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_storage);
do
{
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
new_val = -old_val;
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
}
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
fetch_negate(storage, order);
}
};
#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
//! Negate implementation for IEEE 754 / IEC 559 floating point types. We leverage the fact that the sign bit is the most significant bit in the value.
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_negate_generic< Base, Value, Size, true > :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
//! The mask with only one sign bit set to 1
static BOOST_CONSTEXPR_OR_CONST storage_type sign_mask = static_cast< storage_type >(1u) << (atomics::detail::value_size_of< value_type >::value * 8u - 1u);
static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_fp_cast< value_type >(base_type::fetch_xor(storage, sign_mask, order));
}
static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return atomics::detail::bitwise_fp_cast< value_type >(base_type::bitwise_xor(storage, sign_mask, order));
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::opaque_xor(storage, sign_mask, order);
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
//! Generic implementation of floating point operations
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations_generic :
public extra_fp_negate_generic< Base, Value, Size >
{
typedef extra_fp_negate_generic< Base, Value, Size > base_type;
typedef typename base_type::storage_type storage_type;
typedef Value value_type;
static BOOST_FORCEINLINE value_type add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_storage, new_storage;
value_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_storage);
do
{
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
new_val = old_val + v;
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
}
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE value_type sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_storage, new_storage;
value_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_storage);
do
{
old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
new_val = old_val - v;
new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
}
while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_sub(storage, v, order);
}
};
// Default extra_fp_operations template definition will be used unless specialized for a specific platform
template< typename Base, typename Value, std::size_t Size >
struct extra_fp_operations< Base, Value, Size, true > :
public extra_fp_operations_generic< Base, Value, Size >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#if defined(BOOST_GCC) && BOOST_GCC >= 60000
#pragma GCC diagnostic pop
#endif
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_

View File

@ -0,0 +1,28 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2017 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_operations.hpp
*
* This header defines extra atomic operations, including the generic version.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/extra_ops_generic.hpp>
#include <boost/atomic/detail/extra_ops_emulated.hpp>
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC)
#include BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(boost/atomic/detail/extra_ops_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,38 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2017 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_operations_fwd.hpp
*
* This header contains forward declaration of the \c extra_operations template.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base, std::size_t Size = sizeof(typename Base::storage_type), bool Signed = Base::is_signed, bool = Base::is_always_lock_free >
struct extra_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_

View File

@ -0,0 +1,258 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_emulated.hpp
*
* This header contains emulated (lock-based) implementation of the extra atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
#include <cstddef>
#include <boost/static_assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Emulated implementation of extra operations
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations_emulated :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef typename base_type::scoped_lock scoped_lock;
static storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s = static_cast< storage_type >(-old_val);
return old_val;
}
static storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = static_cast< storage_type >(-s);
s = new_val;
return new_val;
}
static storage_type add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = s;
new_val += v;
s = new_val;
return new_val;
}
static storage_type sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = s;
new_val -= v;
s = new_val;
return new_val;
}
static storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = s;
new_val &= v;
s = new_val;
return new_val;
}
static storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = s;
new_val |= v;
s = new_val;
return new_val;
}
static storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = s;
new_val ^= v;
s = new_val;
return new_val;
}
static storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type old_val = s;
s = static_cast< storage_type >(~old_val);
return old_val;
}
static storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type& s = const_cast< storage_type& >(storage);
scoped_lock lock(&storage);
storage_type new_val = static_cast< storage_type >(~s);
s = new_val;
return new_val;
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_sub(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_and(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_or(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
base_type::fetch_xor(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
fetch_complement(storage, order);
}
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!add(storage, v, order);
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!sub(storage, v, order);
}
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!negate(storage, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_xor(storage, v, order);
}
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
return !!bitwise_complement(storage, order);
}
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
storage_type old_val = base_type::fetch_or(storage, mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
BOOST_STATIC_ASSERT_MSG(!base_type::is_interprocess, "Boost.Atomic: operation invoked on a non-lock-free inter-process atomic object");
storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
storage_type old_val = base_type::fetch_xor(storage, mask, order);
return !!(old_val & mask);
}
};
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, Size, Signed, false > :
public extra_operations_emulated< Base, Size, Signed >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,844 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2017 - 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_gcc_ppc.hpp
*
* This header contains implementation of the extra atomic operations for PowerPC.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
#include <boost/atomic/detail/gcc_ppc_asm_common.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
template< typename Base >
struct extra_operations_gcc_ppc_common :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_complement(storage, order);
}
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::negate(storage, order);
}
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::add(storage, v, order);
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::sub(storage, v, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::bitwise_xor(storage, v, order);
}
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!base_type::bitwise_complement(storage, order);
}
};
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations_gcc_ppc;
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
template< typename Base, bool Signed >
struct extra_operations_gcc_ppc< Base, 1u, Signed > :
public extra_operations_generic< Base, 1u, Signed >
{
typedef extra_operations_generic< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lbarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stbcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 1u, Signed, true > :
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 1u, Signed > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
struct extra_operations_gcc_ppc< Base, 2u, Signed > :
public extra_operations_generic< Base, 2u, Signed >
{
typedef extra_operations_generic< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lharx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"sthcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
struct extra_operations_gcc_ppc< Base, 4u, Signed > :
public extra_operations_generic< Base, 4u, Signed >
{
typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"lwarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stwcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 4u, Signed > >
{
};
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
template< typename Base, bool Signed >
struct extra_operations_gcc_ppc< Base, 8u, Signed > :
public extra_operations_generic< Base, 8u, Signed >
{
typedef extra_operations_generic< Base, 8u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"neg %1,%0\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"add %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"sub %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"and %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"or %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type original, result;
core_arch_operations_gcc_ppc_base::fence_before(order);
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"xor %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return original;
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
core_arch_operations_gcc_ppc_base::fence_before(order);
storage_type original, result;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_PPC_ASM_LABEL("1")
"ldarx %0,%y2\n\t"
"nor %1,%0,%0\n\t"
"stdcx. %1,%y2\n\t"
BOOST_ATOMIC_DETAIL_PPC_ASM_JUMP("bne-", "1b", "-12")
: "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
core_arch_operations_gcc_ppc_base::fence_after(order);
return result;
}
};
template< typename Base, bool Signed >
struct extra_operations< Base, 8u, Signed, true > :
public extra_operations_gcc_ppc_common< extra_operations_gcc_ppc< Base, 8u, Signed > >
{
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_PPC_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,394 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2015 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_generic.hpp
*
* This header contains generic implementation of the extra atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/integral_conversions.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Generic implementation of extra operations
template< typename Base, std::size_t Size, bool Signed, bool = Base::full_cas_based >
struct extra_operations_generic :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!base_type::compare_exchange_weak(storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_add(storage, v, order) + v;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_sub(storage, v, order) - v;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_and(storage, v, order) & v;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_or(storage, v, order) | v;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_xor(storage, v, order) ^ v;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u)));
return base_type::fetch_xor(storage, mask, order) ^ mask;
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_sub(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_and(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_or(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_xor(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
fetch_complement(storage, order);
}
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(add(storage, v, order));
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(sub(storage, v, order));
}
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!negate(storage, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_xor(storage, v, order);
}
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
}
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_or(storage, mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_xor(storage, mask, order);
return !!(old_val & mask);
}
};
//! Specialization for cases when the platform only natively supports CAS
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations_generic< Base, Size, Signed, true > :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
typedef typename storage_traits< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
while (!base_type::compare_exchange_weak(storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val & v));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val | v));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val, new_val;
atomics::detail::non_atomic_load(storage, old_val);
do
{
new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val ^ v));
}
while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return new_val;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return base_type::fetch_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
}
static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return bitwise_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_sub(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
fetch_negate(storage, order);
}
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_and(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_or(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fetch_xor(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
fetch_complement(storage, order);
}
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(add(storage, v, order));
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(sub(storage, v, order));
}
static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!negate(storage, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
return !!bitwise_xor(storage, v, order);
}
static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
}
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_or(storage, mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_and(storage, ~mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
storage_type old_val = base_type::fetch_xor(storage, mask, order);
return !!(old_val & mask);
}
};
// Default extra_operations template definition will be used unless specialized for a specific platform
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, Size, Signed, true > :
public extra_operations_generic< Base, Size, Signed >
{
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_

View File

@ -0,0 +1,108 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2017 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_msvc_arm.hpp
*
* This header contains implementation of the extra atomic operations for ARM.
*/
#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/storage_traits.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/extra_ops_generic.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
template< typename Base, std::size_t Size, bool Signed >
struct extra_operations< Base, 4u, Signed, true > :
public extra_operations_generic< Base, 4u, Signed >
{
typedef extra_operations_generic< Base, 4u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE)
bool result;
switch (order)
{
case memory_order_relaxed:
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(&storage, bit_number);
break;
case memory_order_consume:
case memory_order_acquire:
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(&storage, bit_number);
break;
case memory_order_release:
result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(&storage, bit_number);
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
result = !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
break;
}
return result;
#else
return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
#endif
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE)
bool result;
switch (order)
{
case memory_order_relaxed:
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(&storage, bit_number);
break;
case memory_order_consume:
case memory_order_acquire:
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(&storage, bit_number);
break;
case memory_order_release:
result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(&storage, bit_number);
break;
case memory_order_acq_rel:
case memory_order_seq_cst:
default:
result = !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
break;
}
return result;
#else
return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
#endif
}
};
#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_operations.hpp
*
* This header defines architecture-specific fence atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#if defined(BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND_HEADER(boost/atomic/detail/fence_arch_ops_)
#else
#include <boost/atomic/detail/fence_operations_emulated.hpp>
namespace boost {
namespace atomics {
namespace detail {
typedef fence_operations_emulated fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,60 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_aarch32.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for AArch32
struct fence_arch_operations_gcc_aarch32
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
{
if (order == memory_order_consume || order == memory_order_acquire)
__asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
else
__asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_arch_operations_gcc_aarch32 fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH32_HPP_INCLUDED_

View File

@ -0,0 +1,58 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_aarch64.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for AArch64
struct fence_arch_operations_gcc_aarch64
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
{
if (order == memory_order_consume || order == memory_order_acquire)
__asm__ __volatile__ ("dmb ishld\n\t" ::: "memory");
else
__asm__ __volatile__ ("dmb ish\n\t" ::: "memory");
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_arch_operations_gcc_aarch64 fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_AARCH64_HPP_INCLUDED_

View File

@ -0,0 +1,53 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_alpha.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for Alpha
struct fence_arch_operations_gcc_alpha
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_arch_operations_gcc_alpha fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ALPHA_HPP_INCLUDED_

View File

@ -0,0 +1,90 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_arm.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/capabilities.hpp>
#include <boost/atomic/detail/gcc_arm_asm_common.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for legacy ARM
struct fence_arch_operations_gcc_arm
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
hardware_full_fence();
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
// A memory barrier is effected using a "co-processor 15" instruction,
// though a separate assembler mnemonic is available for it in v7.
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
// Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
// As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
// Since we cannot detect binutils version at compile time, we'll have to always use this hack.
__asm__ __volatile__
(
#if defined(__thumb2__)
".short 0xF3BF, 0x8F5B\n\t" // dmb ish
#else
".word 0xF57FF05B\n\t" // dmb ish
#endif
:
:
: "memory"
);
#else
uint32_t tmp;
__asm__ __volatile__
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
"mcr p15, 0, r0, c7, c10, 5\n\t"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
: "=&l" (tmp)
:
: "memory"
);
#endif
}
};
typedef fence_arch_operations_gcc_arm fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,68 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_ppc.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for PowerPC
struct fence_arch_operations_gcc_ppc
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
{
#if defined(__powerpc64__) || defined(__PPC64__)
if (order != memory_order_seq_cst)
__asm__ __volatile__ ("lwsync" ::: "memory");
else
__asm__ __volatile__ ("sync" ::: "memory");
#else
__asm__ __volatile__ ("sync" ::: "memory");
#endif
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
{
#if defined(__ibmxl__) || defined(__IBMCPP__)
__fence();
#else
__asm__ __volatile__ ("" ::: "memory");
#endif
}
}
};
typedef fence_arch_operations_gcc_ppc fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_PPC_HPP_INCLUDED_

View File

@ -0,0 +1,70 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_sparc.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for SPARC
struct fence_arch_operations_gcc_sparc
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
switch (order)
{
case memory_order_release:
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
break;
case memory_order_consume:
case memory_order_acquire:
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore" ::: "memory");
break;
case memory_order_acq_rel:
__asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore" ::: "memory");
break;
case memory_order_seq_cst:
__asm__ __volatile__ ("membar #Sync" ::: "memory");
break;
case memory_order_relaxed:
default:
break;
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_arch_operations_gcc_sparc fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SPARC_HPP_INCLUDED_

View File

@ -0,0 +1,69 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_gcc_x86.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for x86
struct fence_arch_operations_gcc_x86
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
{
// We could generate mfence for a seq_cst fence here, but a dummy lock-prefixed instruction is enough
// and is faster than mfence on most modern x86 CPUs (as of 2020).
// Note that we want to apply the atomic operation on any location so that:
// - It is not shared with other threads. A variable on the stack suits this well.
// - It is likely in cache. Being close to the top of the stack fits this well.
// - It does not alias existing data on the stack, so that we don't introduce a false data dependency.
// See some performance data here: https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
// Unfortunately, to make tools like valgrind happy, we have to initialize the dummy, which is
// otherwise not needed.
unsigned char dummy = 0u;
__asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) : : "memory");
}
else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
{
__asm__ __volatile__ ("" ::: "memory");
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_arch_operations_gcc_x86 fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_X86_HPP_INCLUDED_

View File

@ -0,0 +1,66 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_msvc_arm.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
extern "C" void __dmb(unsigned int);
#if defined(BOOST_MSVC)
#pragma intrinsic(__dmb)
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for ARM
struct fence_arch_operations_msvc_arm
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order != memory_order_relaxed)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
__dmb(0xB); // _ARM_BARRIER_ISH, see armintr.h from MSVC 11 and later
}
};
typedef fence_arch_operations_msvc_arm fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,66 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_arch_ops_msvc_x86.hpp
*
* This header contains implementation of the \c fence_arch_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations for x86
struct fence_arch_operations_msvc_x86
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
{
// See the comment in fence_ops_gcc_x86.hpp as to why we're not using mfence here.
// We're not using __faststorefence() here because it generates an atomic operation
// on [rsp]/[esp] location, which may alias valid data and cause false data dependency.
boost::uint32_t dummy;
BOOST_ATOMIC_INTERLOCKED_INCREMENT(&dummy);
}
else if (order != memory_order_relaxed)
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
};
typedef fence_arch_operations_msvc_x86 fence_arch_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_MSVC_X86_HPP_INCLUDED_

View File

@ -0,0 +1,41 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_operations.hpp
*
* This header defines fence atomic operations.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/platform.hpp>
#if defined(BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER)
#include BOOST_ATOMIC_DETAIL_CORE_BACKEND_HEADER(boost/atomic/detail/fence_ops_)
#else
#include <boost/atomic/detail/fence_arch_operations.hpp>
namespace boost {
namespace atomics {
namespace detail {
typedef fence_arch_operations fence_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_HPP_INCLUDED_

View File

@ -0,0 +1,50 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_operations_emulated.hpp
*
* This header contains implementation of the \c fence_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/lock_pool.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations based on lock pool
struct fence_operations_emulated
{
static BOOST_FORCEINLINE void thread_fence(memory_order) BOOST_NOEXCEPT
{
atomics::detail::lock_pool::thread_fence();
}
static BOOST_FORCEINLINE void signal_fence(memory_order) BOOST_NOEXCEPT
{
atomics::detail::lock_pool::signal_fence();
}
};
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPERATIONS_EMULATED_HPP_INCLUDED_

View File

@ -0,0 +1,75 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_ops_gcc_atomic.hpp
*
* This header contains implementation of the \c fence_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/fence_arch_operations.hpp>
#include <boost/atomic/detail/gcc_atomic_memory_order_utils.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(__INTEL_COMPILER)
// This is used to suppress warning #32013 described in gcc_atomic_memory_order_utils.hpp
// for Intel Compiler.
// In debug builds the compiler does not inline any functions, so basically
// every atomic function call results in this warning. I don't know any other
// way to selectively disable just this one warning.
#pragma system_header
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations based on gcc __atomic* intrinsics
struct fence_operations_gcc_atomic
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
#if defined(__x86_64__) || defined(__i386__)
if (order != memory_order_seq_cst)
{
__atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
}
else
{
// gcc, clang, icc and probably other compilers generate mfence for a seq_cst fence,
// while a dummy lock-prefixed instruction would be enough and faster. See the comment in fence_ops_gcc_x86.hpp.
fence_arch_operations::thread_fence(order);
}
#else
__atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
#endif
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
__atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
}
};
typedef fence_operations_gcc_atomic fence_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_ATOMIC_HPP_INCLUDED_

View File

@ -0,0 +1,53 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_ops_gcc_sync.hpp
*
* This header contains implementation of the \c fence_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations based on gcc __sync* intrinsics
struct fence_operations_gcc_sync
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__sync_synchronize();
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
};
typedef fence_operations_gcc_sync fence_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_GCC_SYNC_HPP_INCLUDED_

View File

@ -0,0 +1,64 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2009, 2011 Helge Bahmann
* Copyright (c) 2009 Phil Endecott
* Copyright (c) 2013 Tim Blechmann
* Linux-specific code by Phil Endecott
* Copyright (c) 2014 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_ops_linux_arm.hpp
*
* This header contains implementation of the \c fence_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations based on Linux-specific system routines
struct fence_operations_linux_arm
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
hardware_full_fence();
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
// See the comment in core_ops_linux_arm.hpp regarding the function pointer below
typedef void (*kernel_dmb_t)(void);
((kernel_dmb_t)0xffff0fa0)();
}
};
typedef fence_operations_linux_arm fence_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_LINUX_ARM_HPP_INCLUDED_

View File

@ -0,0 +1,67 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2020 Andrey Semashev
*/
/*!
* \file atomic/detail/fence_ops_windows.hpp
*
* This header contains implementation of the \c fence_operations struct.
*/
#ifndef BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
#include <boost/atomic/detail/ops_msvc_common.hpp>
#include <boost/atomic/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
namespace atomics {
namespace detail {
//! Fence operations based on Windows-specific system calls or intrinsics
struct fence_operations_windows
{
static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
if (order == memory_order_seq_cst)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
}
static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
{
boost::uint32_t tmp;
BOOST_ATOMIC_INTERLOCKED_INCREMENT(&tmp);
}
};
typedef fence_operations_windows fence_operations;
} // namespace detail
} // namespace atomics
} // namespace boost
#include <boost/atomic/detail/footer.hpp>
#endif // BOOST_ATOMIC_DETAIL_FENCE_OPS_WINDOWS_HPP_INCLUDED_

View File

@ -0,0 +1,142 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/float_sizes.hpp
*
* This header defines macros for testing buitin floating point type sizes
*/
#ifndef BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
#include <float.h>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
// Detect value sizes of the different floating point types. The value sizes may be less than the corresponding type sizes
// if the type contains padding bits. This is typical e.g. with 80-bit extended float types, which are often represented as 128-bit types.
// See: https://en.wikipedia.org/wiki/IEEE_754
// For Intel x87 extended double see: https://en.wikipedia.org/wiki/Extended_precision#x86_Architecture_Extended_Precision_Format
// For IBM extended double (a.k.a. double-double) see: https://en.wikipedia.org/wiki/Long_double#Implementations, https://gcc.gnu.org/wiki/Ieee128PowerPC
#if (FLT_RADIX+0) == 2
#if ((FLT_MANT_DIG+0) == 11) && ((FLT_MAX_EXP+0) == 16) // IEEE 754 binary16
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 2
#elif ((FLT_MANT_DIG+0) == 24) && ((FLT_MAX_EXP+0) == 128) // IEEE 754 binary32
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
#elif ((FLT_MANT_DIG+0) == 53) && ((FLT_MAX_EXP+0) == 1024) // IEEE 754 binary64
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
#elif ((FLT_MANT_DIG+0) == 64) && ((FLT_MAX_EXP+0) == 16384) // x87 extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 10
#elif ((FLT_MANT_DIG+0) == 106) && ((FLT_MAX_EXP+0) == 1024) // IBM extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
#elif ((FLT_MANT_DIG+0) == 113) && ((FLT_MAX_EXP+0) == 16384) // IEEE 754 binary128
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
#elif ((FLT_MANT_DIG+0) == 237) && ((FLT_MAX_EXP+0) == 262144) // IEEE 754 binary256
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 32
#endif
#if ((DBL_MANT_DIG+0) == 11) && ((DBL_MAX_EXP+0) == 16) // IEEE 754 binary16
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 2
#elif ((DBL_MANT_DIG+0) == 24) && ((DBL_MAX_EXP+0) == 128) // IEEE 754 binary32
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
#elif ((DBL_MANT_DIG+0) == 53) && ((DBL_MAX_EXP+0) == 1024) // IEEE 754 binary64
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
#elif ((DBL_MANT_DIG+0) == 64) && ((DBL_MAX_EXP+0) == 16384) // x87 extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 10
#elif ((DBL_MANT_DIG+0) == 106) && ((DBL_MAX_EXP+0) == 1024) // IBM extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
#elif ((DBL_MANT_DIG+0) == 113) && ((DBL_MAX_EXP+0) == 16384) // IEEE 754 binary128
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
#elif ((DBL_MANT_DIG+0) == 237) && ((DBL_MAX_EXP+0) == 262144) // IEEE 754 binary256
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 32
#endif
#if ((LDBL_MANT_DIG+0) == 11) && ((LDBL_MAX_EXP+0) == 16) // IEEE 754 binary16
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 2
#elif ((LDBL_MANT_DIG+0) == 24) && ((LDBL_MAX_EXP+0) == 128) // IEEE 754 binary32
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
#elif ((LDBL_MANT_DIG+0) == 53) && ((LDBL_MAX_EXP+0) == 1024) // IEEE 754 binary64
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
#elif ((LDBL_MANT_DIG+0) == 64) && ((LDBL_MAX_EXP+0) == 16384) // x87 extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 10
#elif ((LDBL_MANT_DIG+0) == 106) && ((LDBL_MAX_EXP+0) == 1024) // IBM extended double
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
#elif ((LDBL_MANT_DIG+0) == 113) && ((LDBL_MAX_EXP+0) == 16384) // IEEE 754 binary128
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
#elif ((LDBL_MANT_DIG+0) == 237) && ((LDBL_MAX_EXP+0) == 262144) // IEEE 754 binary256
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 32
#endif
#elif (FLT_RADIX+0) == 10
#if ((FLT_MANT_DIG+0) == 7) && ((FLT_MAX_EXP+0) == 97) // IEEE 754 decimal32
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
#elif ((FLT_MANT_DIG+0) == 16) && ((FLT_MAX_EXP+0) == 385) // IEEE 754 decimal64
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
#elif ((FLT_MANT_DIG+0) == 34) && ((FLT_MAX_EXP+0) == 6145) // IEEE 754 decimal128
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
#endif
#if ((DBL_MANT_DIG+0) == 7) && ((DBL_MAX_EXP+0) == 97) // IEEE 754 decimal32
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
#elif ((DBL_MANT_DIG+0) == 16) && ((DBL_MAX_EXP+0) == 385) // IEEE 754 decimal64
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
#elif ((DBL_MANT_DIG+0) == 34) && ((DBL_MAX_EXP+0) == 6145) // IEEE 754 decimal128
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
#endif
#if ((LDBL_MANT_DIG+0) == 7) && ((LDBL_MAX_EXP+0) == 97) // IEEE 754 decimal32
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
#elif ((LDBL_MANT_DIG+0) == 16) && ((LDBL_MAX_EXP+0) == 385) // IEEE 754 decimal64
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
#elif ((LDBL_MANT_DIG+0) == 34) && ((LDBL_MAX_EXP+0) == 6145) // IEEE 754 decimal128
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
#endif
#endif
// GCC and compatible compilers define internal macros with builtin type traits
#if defined(__SIZEOF_FLOAT__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT __SIZEOF_FLOAT__
#endif
#if defined(__SIZEOF_DOUBLE__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE __SIZEOF_DOUBLE__
#endif
#if defined(__SIZEOF_LONG_DOUBLE__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE __SIZEOF_LONG_DOUBLE__
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
#define BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(x)\
((x) == 1u ? 1u : ((x) == 2u ? 2u : ((x) <= 4u ? 4u : ((x) <= 8u ? 8u : ((x) <= 16u ? 16u : ((x) <= 32u ? 32u : (x)))))))
// Make our best guess. These sizes may not be accurate, but they are good enough to estimate the size of the storage required to hold these types.
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
#endif
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
#endif
#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
#error Boost.Atomic: Failed to determine builtin floating point type sizes, the target platform is not supported. Please, report to the developers (patches are welcome).
#endif
#endif // BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_

Some files were not shown because too many files have changed in this diff Show More