summaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2022-03-14 16:12:08 +0100
committerGitHub <noreply@github.com>2022-03-14 16:12:08 +0100
commit0a8caeae3ea37d513a649a156e544bd59ab0545a (patch)
treeb3e16ed64fc9628db08146794390ea87ff29b29b /vespalib
parentde23069111b791ab1b747a7f678e108a805d4f2d (diff)
parentd62262b52276b9d4c676e7a60a944e46c3628fe6 (diff)
Merge pull request #21662 from vespa-engine/vekterli/use-atomic-values-in-btree-bucket-database-impl
Make B-tree bucket database values atomic to ensure well-defined access [run-systemtest]
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h63
2 files changed, 65 insertions, 0 deletions
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
index 05323d1329a..9f98ba05493 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
@@ -4,6 +4,7 @@
#include "btreerootbase.h"
#include "btreeroot.h"
#include "btreenodeallocator.h"
+#include <vespa/vespalib/datastore/atomic_value_wrapper.h>
#include <vespa/vespalib/datastore/datastore.h>
#include <vespa/vespalib/datastore/buffer_type.hpp>
@@ -43,6 +44,7 @@ VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, uint32_t, NoAggrega
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, int32_t , MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint64_t, uint64_t , MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
+VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint64_t, AtomicValueWrapper<uint64_t>, MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(AtomicEntryRef, AtomicEntryRef, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(AtomicEntryRef, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(EntryRef, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
diff --git a/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h b/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h
new file mode 100644
index 00000000000..3ee871be9b0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h
@@ -0,0 +1,63 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <atomic>
+
+namespace vespalib::datastore {
+
+/**
+ * Copyable atomic wrapper for a primitive value that offers value store and load
+ * functionality with explicit memory ordering constraints. Intended to be used for
+ * non-EntryRef values where atomicity and transitive visibility is a requirement.
+ *
+ * Copying always happens with relaxed ordering, as it expects that the copier has
+ * already loaded the source AtomicValueWrapper with an ordering that is appropriate
+ * for observing any transitive memory dependencies.
+ *
+ * This wrapper is intentionally not implicitly convertible to/from values of the
+ * underlying primitive type.
+ *
+ * Note: use AtomicEntryRef instead if you're wrapping an EntryRef directly.
+ */
+template <typename T>
+class AtomicValueWrapper {
+ static_assert(std::atomic<T>::is_always_lock_free);
+
+ std::atomic<T> _value;
+public:
+ constexpr AtomicValueWrapper() noexcept : _value() {}
+ constexpr explicit AtomicValueWrapper(T value) noexcept : _value(value) {}
+ AtomicValueWrapper(const AtomicValueWrapper& rhs) noexcept
+ : _value(rhs._value.load(std::memory_order_relaxed))
+ {}
+ AtomicValueWrapper(AtomicValueWrapper&& rhs) noexcept
+ : _value(rhs._value.load(std::memory_order_relaxed))
+ {}
+ AtomicValueWrapper& operator=(const AtomicValueWrapper& rhs) noexcept {
+ _value.store(rhs._value.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ return *this;
+ }
+ void store_release(T value) noexcept {
+ _value.store(value, std::memory_order_release);
+ }
+ void store_relaxed(T value) noexcept {
+ _value.store(value, std::memory_order_relaxed);
+ }
+ [[nodiscard]] T load_acquire() const noexcept {
+ return _value.load(std::memory_order_acquire);
+ }
+ [[nodiscard]] T load_relaxed() const noexcept {
+ return _value.load(std::memory_order_relaxed);
+ }
+
+ [[nodiscard]] bool operator==(const AtomicValueWrapper& rhs) const noexcept {
+ return (load_relaxed() == rhs.load_relaxed());
+ }
+ [[nodiscard]] bool operator!=(const AtomicValueWrapper& rhs) const noexcept {
+ return !(*this == rhs);
+ }
+};
+
+}