aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2021-12-07 13:57:06 +0100
committerTor Egge <Tor.Egge@online.no>2021-12-07 13:57:06 +0100
commitb80ddb209c9bf985729c9634d6d3986d1884f1be (patch)
tree6fffc9623bdcd2ce3e394988e3a7bead2a816586 /vespalib
parent09bd9d8aa645771b9552a4cac75e654e122fce67 (diff)
Use EntryRefFilter when compacting array store.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp44
1 files changed, 18 insertions, 26 deletions
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index 5600c64eb3d..9317fa557c0 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "array_store.h"
+#include "entry_ref_filter.h"
#include "datastore.hpp"
#include <atomic>
#include <algorithm>
@@ -127,47 +128,38 @@ private:
DataStoreBase &_dataStore;
ArrayStoreType &_store;
std::vector<uint32_t> _bufferIdsToCompact;
+ EntryRefFilter _filter;
- bool compactingBuffer(uint32_t bufferId) {
- return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(),
- bufferId) != _bufferIdsToCompact.end();
- }
public:
CompactionContext(DataStoreBase &dataStore,
ArrayStoreType &store,
std::vector<uint32_t> bufferIdsToCompact)
: _dataStore(dataStore),
_store(store),
- _bufferIdsToCompact(std::move(bufferIdsToCompact))
- {}
+ _bufferIdsToCompact(std::move(bufferIdsToCompact)),
+ _filter(RefT::numBuffers(), RefT::offset_bits)
+ {
+ _filter.add_buffers(_bufferIdsToCompact);
+ }
~CompactionContext() override {
_dataStore.finishCompact(_bufferIdsToCompact);
}
void compact(vespalib::ArrayRef<EntryRef> refs) override {
- if (!_bufferIdsToCompact.empty()) {
- for (auto &ref : refs) {
- if (ref.valid()) {
- RefT internalRef(ref);
- if (compactingBuffer(internalRef.bufferId())) {
- EntryRef newRef = _store.add(_store.get(ref));
- std::atomic_thread_fence(std::memory_order_release);
- ref = newRef;
- }
- }
+ for (auto &ref : refs) {
+ if (ref.valid() && _filter.has(ref)) {
+ EntryRef newRef = _store.add(_store.get(ref));
+ std::atomic_thread_fence(std::memory_order_release);
+ ref = newRef;
}
}
}
void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override {
- if (!_bufferIdsToCompact.empty()) {
- for (auto &ref : refs) {
- if (ref.load_relaxed().valid()) {
- RefT internalRef(ref.load_relaxed());
- if (compactingBuffer(internalRef.bufferId())) {
- EntryRef newRef = _store.add(_store.get(ref.load_relaxed()));
- std::atomic_thread_fence(std::memory_order_release);
- ref.store_release(newRef);
- }
- }
+ for (auto &atomic_entry_ref : refs) {
+ auto ref = atomic_entry_ref.load_relaxed();
+ if (ref.valid() && _filter.has(ref)) {
+ EntryRef newRef = _store.add(_store.get(ref));
+ std::atomic_thread_fence(std::memory_order_release);
+ atomic_entry_ref.store_release(newRef);
}
}
}