aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@yahooinc.com>2022-02-16 14:38:08 +0000
committerTor Brede Vekterli <vekterli@yahooinc.com>2022-02-16 14:38:08 +0000
commitb1486b7d40bd9853bc77ffb1fd18a9e3d6c90d96 (patch)
treeaaf3003470583690d7523f7e853b67433bcf4277 /vespalib
parent82a4197d541fe4f9a8aa58fdf187a6f7f5111c2a (diff)
Use standard feature test for atomic_ref support instead of ad-hoc macro
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/vespa/vespalib/util/atomic.h18
1 files changed, 7 insertions, 11 deletions
diff --git a/vespalib/src/vespa/vespalib/util/atomic.h b/vespalib/src/vespa/vespalib/util/atomic.h
index ecf2f4abfbf..747a18005aa 100644
--- a/vespalib/src/vespa/vespalib/util/atomic.h
+++ b/vespalib/src/vespa/vespalib/util/atomic.h
@@ -3,6 +3,7 @@
#include <atomic>
#include <type_traits>
+#include <version>
/**
* Utility functions for single value atomic memory accesses.
@@ -23,11 +24,6 @@ namespace vespalib::atomic {
// std::atomic_ref<T> helpers
//
-// No atomic_ref on clang on darwin (for now)
-#if defined(__clang__) && defined(__apple_build_version__)
-# define VESPA_NO_ATOMIC_REF_SUPPORT
-#endif
-
namespace detail {
template <typename T> struct is_std_atomic : std::false_type {};
template <typename T> struct is_std_atomic<std::atomic<T>> : std::true_type {};
@@ -39,7 +35,7 @@ template <typename T> inline constexpr bool is_std_atomic_v = is_std_atomic<T>::
template <typename T1, typename T2>
constexpr void store_ref_relaxed(T1& lhs, T2&& v) noexcept {
static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<T1>::is_always_lock_free);
std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_relaxed);
#else
@@ -51,7 +47,7 @@ constexpr void store_ref_relaxed(T1& lhs, T2&& v) noexcept {
template <typename T1, typename T2>
constexpr void store_ref_release(T1& lhs, T2&& v) noexcept {
static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<T1>::is_always_lock_free);
std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_release);
#else
@@ -64,7 +60,7 @@ constexpr void store_ref_release(T1& lhs, T2&& v) noexcept {
template <typename T1, typename T2>
constexpr void store_ref_seq_cst(T1& lhs, T2&& v) noexcept {
static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<T1>::is_always_lock_free);
std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_seq_cst);
#else
@@ -77,7 +73,7 @@ constexpr void store_ref_seq_cst(T1& lhs, T2&& v) noexcept {
template <typename T>
[[nodiscard]] constexpr T load_ref_relaxed(const T& a) noexcept {
static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<const T>::is_always_lock_free);
return std::atomic_ref<const T>(a).load(std::memory_order_relaxed);
#else
@@ -89,7 +85,7 @@ template <typename T>
template <typename T>
[[nodiscard]] constexpr T load_ref_acquire(const T& a) noexcept {
static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<const T>::is_always_lock_free);
return std::atomic_ref<const T>(a).load(std::memory_order_acquire);
#else
@@ -102,7 +98,7 @@ template <typename T>
template <typename T>
[[nodiscard]] constexpr T load_ref_seq_cst(const T& a) noexcept {
static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
-#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+#if __cpp_lib_atomic_ref
static_assert(std::atomic_ref<const T>::is_always_lock_free);
return std::atomic_ref<const T>(a).load(std::memory_order_acquire);
#else