summaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@yahooinc.com>2022-03-11 12:17:26 +0000
committerTor Brede Vekterli <vekterli@yahooinc.com>2022-03-11 16:28:19 +0000
commit3e033275ec2f1deda6aca43b98949c9297ead2ea (patch)
tree78410573eb744821d5007194eb2240250d1d8b87 /vespalib
parent445f2b8f9b0a80fc6488c7095d5cd9ba8df51b43 (diff)
Make B-tree bucket database values atomic to ensure well-defined access
Existing implementation already used explicit acquire/release fences to ensure visibility from writer to concurrent readers, but the values written/read were not of an atomic type and thus _technically_ considered a data race. This commit adds an AtomicValueWrapper wrapper to vespalib which looks and acts much like the existing AtomicEntryRef, but for primitive types that are not related to EntryRefs. The bucket DB B-tree primitive u64 value type is replaced with an atomic wrapper and explicit memory fences are replaced with release stores and acquire loads on the atomic values themselves to ensure they form correct pairs between writer and readers.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h63
2 files changed, 65 insertions, 0 deletions
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
index 05323d1329a..9f98ba05493 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.cpp
@@ -4,6 +4,7 @@
#include "btreerootbase.h"
#include "btreeroot.h"
#include "btreenodeallocator.h"
+#include <vespa/vespalib/datastore/atomic_value_wrapper.h>
#include <vespa/vespalib/datastore/datastore.h>
#include <vespa/vespalib/datastore/buffer_type.hpp>
@@ -43,6 +44,7 @@ VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, uint32_t, NoAggrega
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint32_t, int32_t , MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint64_t, uint64_t , MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
+VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(uint64_t, AtomicValueWrapper<uint64_t>, MinMaxAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(AtomicEntryRef, AtomicEntryRef, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(AtomicEntryRef, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
VESPALIB_DATASTORE_INSTANTIATE_BUFFERTYPE_LEAFNODE(EntryRef, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::LEAF_SLOTS);
diff --git a/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h b/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h
new file mode 100644
index 00000000000..3ee871be9b0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h
@@ -0,0 +1,63 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <atomic>
+
+namespace vespalib::datastore {
+
+/**
+ * Copyable atomic wrapper for a primitive value that offers value store and load
+ * functionality with explicit memory ordering constraints. Intended to be used for
+ * non-EntryRef values where atomicity and transitive visibility is a requirement.
+ *
+ * Copying always happens with relaxed ordering, as it expects that the copier has
+ * already loaded the source AtomicValueWrapper with an ordering that is appropriate
+ * for observing any transitive memory dependencies.
+ *
+ * This wrapper is intentionally not implicitly convertible to/from values of the
+ * underlying primitive type.
+ *
+ * Note: use AtomicEntryRef instead if you're wrapping an EntryRef directly.
+ */
+template <typename T>
+class AtomicValueWrapper {
+ static_assert(std::atomic<T>::is_always_lock_free);
+
+ std::atomic<T> _value;
+public:
+ constexpr AtomicValueWrapper() noexcept : _value() {}
+ constexpr explicit AtomicValueWrapper(T value) noexcept : _value(value) {}
+ AtomicValueWrapper(const AtomicValueWrapper& rhs) noexcept
+ : _value(rhs._value.load(std::memory_order_relaxed))
+ {}
+ AtomicValueWrapper(AtomicValueWrapper&& rhs) noexcept
+ : _value(rhs._value.load(std::memory_order_relaxed))
+ {}
+ AtomicValueWrapper& operator=(const AtomicValueWrapper& rhs) noexcept {
+ _value.store(rhs._value.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ return *this;
+ }
+ void store_release(T value) noexcept {
+ _value.store(value, std::memory_order_release);
+ }
+ void store_relaxed(T value) noexcept {
+ _value.store(value, std::memory_order_relaxed);
+ }
+ [[nodiscard]] T load_acquire() const noexcept {
+ return _value.load(std::memory_order_acquire);
+ }
+ [[nodiscard]] T load_relaxed() const noexcept {
+ return _value.load(std::memory_order_relaxed);
+ }
+
+ [[nodiscard]] bool operator==(const AtomicValueWrapper& rhs) const noexcept {
+ return (load_relaxed() == rhs.load_relaxed());
+ }
+ [[nodiscard]] bool operator!=(const AtomicValueWrapper& rhs) const noexcept {
+ return !(*this == rhs);
+ }
+};
+
+}