aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib/src/vespa/vespalib/datastore/atomic_value_wrapper.h
blob: 749e61b83fddca75b8371908ae97e3e1705b16a3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

#pragma once

#include <atomic>

namespace vespalib::datastore {

/**
 * Copyable atomic wrapper for a primitive value that offers value store and load
 * functionality with explicit memory ordering constraints. Intended to be used for
 * non-EntryRef values where atomicity and transitive visibility is a requirement.
 *
 * Copying always happens with relaxed ordering, as it expects that the copier has
 * already loaded the source AtomicValueWrapper with an ordering that is appropriate
 * for observing any transitive memory dependencies.
 *
 * This wrapper is intentionally not implicitly convertible to/from values of the
 * underlying primitive type.
 *
 * Note: use AtomicEntryRef instead if you're wrapping an EntryRef directly.
 */
template <typename T>
class AtomicValueWrapper {
    static_assert(std::atomic<T>::is_always_lock_free);

    std::atomic<T> _value;
public:
    constexpr AtomicValueWrapper() noexcept : _value() {}
    constexpr explicit AtomicValueWrapper(T value) noexcept : _value(value) {}
    AtomicValueWrapper(const AtomicValueWrapper& rhs) noexcept
        : _value(rhs._value.load(std::memory_order_relaxed))
    {}
    AtomicValueWrapper(AtomicValueWrapper&& rhs) noexcept
        : _value(rhs._value.load(std::memory_order_relaxed))
    {}
    AtomicValueWrapper& operator=(const AtomicValueWrapper& rhs) noexcept {
        _value.store(rhs._value.load(std::memory_order_relaxed),
                     std::memory_order_relaxed);
        return *this;
    }
    void store_release(T value) noexcept {
        _value.store(value, std::memory_order_release);
    }
    void store_relaxed(T value) noexcept {
        _value.store(value, std::memory_order_relaxed);
    }
    [[nodiscard]] T load_acquire() const noexcept {
        return _value.load(std::memory_order_acquire);
    }
    [[nodiscard]] T load_relaxed() const noexcept {
        return _value.load(std::memory_order_relaxed);
    }

    [[nodiscard]] bool operator==(const AtomicValueWrapper& rhs) const noexcept {
        return (load_relaxed() == rhs.load_relaxed());
    }
    [[nodiscard]] bool operator!=(const AtomicValueWrapper& rhs) const noexcept {
        return !(*this == rhs);
    }
};

}