aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@yahooinc.com>2022-02-16 13:05:20 +0000
committerTor Brede Vekterli <vekterli@yahooinc.com>2022-02-16 13:56:41 +0000
commit82a4197d541fe4f9a8aa58fdf187a6f7f5111c2a (patch)
treeaaae153be7a2cc73121b66fc999cf82520043e50 /vespalib
parent6f99bd502132cd378124a40060ac1d74d54f5e92 (diff)
Add vespalib utility functions for atomic memory access
Adds the following utilities: * Atomic reference wrapper functions for accessing single memory locations as if they were `std::atomic` instances. * Wrappers for less verbose `std::atomic` loads/stores that also sanity-check that accesses are always lock-free.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/spin_lock/spin_lock_test.cpp17
-rw-r--r--vespalib/src/vespa/vespalib/util/atomic.h155
2 files changed, 160 insertions, 12 deletions
diff --git a/vespalib/src/tests/spin_lock/spin_lock_test.cpp b/vespalib/src/tests/spin_lock/spin_lock_test.cpp
index 847b01247e7..78e35a3e8d1 100644
--- a/vespalib/src/tests/spin_lock/spin_lock_test.cpp
+++ b/vespalib/src/tests/spin_lock/spin_lock_test.cpp
@@ -1,12 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/util/spin_lock.h>
+#include <vespa/vespalib/util/atomic.h>
#include <vespa/vespalib/util/benchmark_timer.h>
#include <vespa/vespalib/util/time.h>
#include <vespa/vespalib/testkit/test_kit.h>
#include <array>
using namespace vespalib;
+using namespace vespalib::atomic;
bool verbose = false;
double budget = 0.25;
@@ -19,30 +21,21 @@ struct DummyLock {
//-----------------------------------------------------------------------------
-template <typename T>
-constexpr void relaxed_store(T& lhs, T v) noexcept {
- std::atomic_ref<T>(lhs).store(v, std::memory_order_relaxed);
-}
-template <typename T>
-constexpr T relaxed_load(const T& a) noexcept {
- return std::atomic_ref<const T>(a).load(std::memory_order_relaxed);
-}
-
struct MyState {
static constexpr size_t SZ = 5;
std::array<size_t,SZ> state = {0,0,0,0,0};
void update() {
std::array<size_t,SZ> tmp;
for (size_t i = 0; i < SZ; ++i) {
- relaxed_store(tmp[i], relaxed_load(state[i]));
+ store_ref_relaxed(tmp[i], load_ref_relaxed(state[i]));
}
for (size_t i = 0; i < SZ; ++i) {
- relaxed_store(state[i], relaxed_load(tmp[i]) + 1);
+ store_ref_relaxed(state[i], load_ref_relaxed(tmp[i]) + 1);
}
}
bool check(size_t expect) const {
for (const auto& value: state) {
- if (relaxed_load(value) != expect) {
+ if (load_ref_relaxed(value) != expect) {
return false;
}
}
diff --git a/vespalib/src/vespa/vespalib/util/atomic.h b/vespalib/src/vespa/vespalib/util/atomic.h
new file mode 100644
index 00000000000..ecf2f4abfbf
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/atomic.h
@@ -0,0 +1,155 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <atomic>
+#include <type_traits>
+
+/**
+ * Utility functions for single value atomic memory accesses.
+ *
+ * store/load_ref_* functions can be used to provide well-defined atomic
+ * memory access to memory locations that aren't explicitly wrapped in std::atomic
+ * objects. In this case, all potentially racing loads/stores _must_ be through
+ * atomic utility functions (or atomic_ref).
+ *
+ * Non-ref store/load_* functions are just syntactic sugar to make code using
+ * atomics more readable, but additionally adds sanity checks that all atomics
+ * are always lock-free.
+ */
+
+namespace vespalib::atomic {
+
+//
+// std::atomic_ref<T> helpers
+//
+
+// No atomic_ref on clang on darwin (for now)
+#if defined(__clang__) && defined(__apple_build_version__)
+# define VESPA_NO_ATOMIC_REF_SUPPORT
+#endif
+
+namespace detail {
+template <typename T> struct is_std_atomic : std::false_type {};
+template <typename T> struct is_std_atomic<std::atomic<T>> : std::true_type {};
+template <typename T> inline constexpr bool is_std_atomic_v = is_std_atomic<T>::value;
+}
+
+// TODO can generalize atomic_ref code once no special casing is needed
+
+template <typename T1, typename T2>
+constexpr void store_ref_relaxed(T1& lhs, T2&& v) noexcept {
+ static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<T1>::is_always_lock_free);
+ std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_relaxed);
+#else
+ // TODO replace with compiler intrinsic
+ lhs = std::forward<T2>(v);
+#endif
+}
+
+template <typename T1, typename T2>
+constexpr void store_ref_release(T1& lhs, T2&& v) noexcept {
+ static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<T1>::is_always_lock_free);
+ std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_release);
+#else
+ // TODO replace with compiler intrinsic
+ lhs = std::forward<T2>(v);
+ std::atomic_thread_fence(std::memory_order_release);
+#endif
+}
+
+template <typename T1, typename T2>
+constexpr void store_ref_seq_cst(T1& lhs, T2&& v) noexcept {
+ static_assert(!detail::is_std_atomic_v<T1>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<T1>::is_always_lock_free);
+ std::atomic_ref<T1>(lhs).store(std::forward<T2>(v), std::memory_order_seq_cst);
+#else
+ // TODO replace with compiler intrinsic
+ lhs = std::forward<T2>(v);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_ref_relaxed(const T& a) noexcept {
+ static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<const T>::is_always_lock_free);
+ return std::atomic_ref<const T>(a).load(std::memory_order_relaxed);
+#else
+ // TODO replace with compiler intrinsic
+ return a;
+#endif
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_ref_acquire(const T& a) noexcept {
+ static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<const T>::is_always_lock_free);
+ return std::atomic_ref<const T>(a).load(std::memory_order_acquire);
+#else
+ // TODO replace with compiler intrinsic
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return a;
+#endif
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_ref_seq_cst(const T& a) noexcept {
+ static_assert(!detail::is_std_atomic_v<T>, "atomic ref function invoked with a std::atomic, probably not intended");
+#ifndef VESPA_NO_ATOMIC_REF_SUPPORT
+ static_assert(std::atomic_ref<const T>::is_always_lock_free);
+ return std::atomic_ref<const T>(a).load(std::memory_order_acquire);
+#else
+ // TODO replace with compiler intrinsic
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ return a;
+#endif
+}
+
+//
+// std::atomic<T> helpers
+//
+
+template <typename T1, typename T2>
+constexpr void store_relaxed(std::atomic<T1>& lhs, T2&& v) noexcept {
+ static_assert(std::atomic<T1>::is_always_lock_free);
+ lhs.store(std::forward<T2>(v), std::memory_order_relaxed);
+}
+
+template <typename T1, typename T2>
+constexpr void store_release(std::atomic<T1>& lhs, T2&& v) noexcept {
+ static_assert(std::atomic<T1>::is_always_lock_free);
+ lhs.store(std::forward<T2>(v), std::memory_order_release);
+}
+
+template <typename T1, typename T2>
+constexpr void store_seq_cst(std::atomic<T1>& lhs, T2&& v) noexcept {
+ static_assert(std::atomic<T1>::is_always_lock_free);
+ lhs.store(std::forward<T2>(v), std::memory_order_seq_cst);
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_relaxed(const std::atomic<T>& a) noexcept {
+ static_assert(std::atomic<T>::is_always_lock_free);
+ return a.load(std::memory_order_relaxed);
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_acquire(const std::atomic<T>& a) noexcept {
+ static_assert(std::atomic<T>::is_always_lock_free);
+ return a.load(std::memory_order_acquire);
+}
+
+template <typename T>
+[[nodiscard]] constexpr T load_seq_cst(const std::atomic<T>& a) noexcept {
+ static_assert(std::atomic<T>::is_always_lock_free);
+ return a.load(std::memory_order_seq_cst);
+}
+
+} // vespalib::atomic