summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@verizonmedia.com>2020-06-29 13:01:28 +0000
committerTor Brede Vekterli <vekterli@verizonmedia.com>2020-06-29 13:01:28 +0000
commit947982bec5303debd8b4ec2b90d9f59df7f64131 (patch)
tree63cc9874bb909557c91c32cade574088b49fac7a /storage
parent3f7ba13c24d6ac5b4ee77f61580a9bbf867cbd06 (diff)
Address review comments
Also rewrite some GMock macros that triggered Valgrind warnings due to default test object printers accessing uninitialized memory.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/bucketdb/lockablemaptest.cpp13
-rw-r--r--storage/src/vespa/storage/bucketdb/abstract_bucket_map.h4
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp2
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_lockable_map.h5
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp14
-rw-r--r--storage/src/vespa/storage/bucketdb/generic_btree_bucket_database.hpp10
6 files changed, 25 insertions, 23 deletions
diff --git a/storage/src/tests/bucketdb/lockablemaptest.cpp b/storage/src/tests/bucketdb/lockablemaptest.cpp
index 50cde580f55..c5d22b6e6b5 100644
--- a/storage/src/tests/bucketdb/lockablemaptest.cpp
+++ b/storage/src/tests/bucketdb/lockablemaptest.cpp
@@ -6,7 +6,6 @@
#include <vespa/storage/bucketdb/lockablemap.hpp>
#include <vespa/storage/bucketdb/btree_lockable_map.hpp>
#include <vespa/vespalib/gtest/gtest.h>
-#include <gmock/gmock.h>
#include <vespa/log/log.h>
LOG_SETUP(".lockable_map_test");
@@ -79,11 +78,11 @@ TYPED_TEST(LockableMapTest, simple_usage) {
EXPECT_EQ(false, preExisted);
map.insert(14, A(42, 0, 0), "foo", preExisted);
EXPECT_EQ(false, preExisted);
- EXPECT_THAT(map, SizeIs(3));
+ EXPECT_EQ(map.size(), 3);
map.insert(11, A(4, 7, 0), "foo", preExisted);
EXPECT_EQ(true, preExisted);
- EXPECT_THAT(map, SizeIs(3));
+ EXPECT_EQ(map.size(), 3);
EXPECT_FALSE(map.empty());
// Access some elements
@@ -93,14 +92,14 @@ TYPED_TEST(LockableMapTest, simple_usage) {
// Do removes
EXPECT_EQ(map.erase(12, "foo"), 0);
- EXPECT_THAT(map, SizeIs(3));
+ EXPECT_EQ(map.size(), 3);
EXPECT_EQ(map.erase(14, "foo"), 1);
- EXPECT_THAT(map, SizeIs(2));
+ EXPECT_EQ(map.size(), 2);
EXPECT_EQ(map.erase(11, "foo"), 1);
EXPECT_EQ(map.erase(16, "foo"), 1);
- EXPECT_THAT(map, SizeIs(0));
+ EXPECT_EQ(map.size(), 0);
EXPECT_TRUE(map.empty());
}
@@ -721,7 +720,7 @@ TYPED_TEST(LockableMapTest, is_consistent) {
}
}
-TYPED_TEST(LockableMapTest, get_without_auto_create_does_implicitly_not_lock_bucket) {
+TYPED_TEST(LockableMapTest, get_without_auto_create_does_not_implicitly_lock_bucket) {
TypeParam map;
BucketId id(16, 0x00001);
diff --git a/storage/src/vespa/storage/bucketdb/abstract_bucket_map.h b/storage/src/vespa/storage/bucketdb/abstract_bucket_map.h
index 55b52a2cd38..6c669beab1c 100644
--- a/storage/src/vespa/storage/bucketdb/abstract_bucket_map.h
+++ b/storage/src/vespa/storage/bucketdb/abstract_bucket_map.h
@@ -155,8 +155,8 @@ public:
/**
* Iterate over the entire database contents, holding the global database
* mutex for `chunkSize` processed entries at a time, yielding the current
- * thread between each such such to allow other threads to get a chance
- * at acquiring a bucket lock.
+ * thread between each chunk to allow other threads to get a chance at
+ * acquiring a bucket lock.
*
* TODO deprecate in favor of snapshotting once fully on B-tree DB
*
diff --git a/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp b/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
index 53fa5afc93c..42bd3a247bb 100644
--- a/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
+++ b/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
@@ -591,7 +591,7 @@ struct BTreeBucketDatabase2 {
static ValueType make_invalid_value() {
return Entry::createInvalid();
}
- static uint64_t store_and_wrap_value(DataStoreType& store, const Entry& entry) noexcept {
+ static uint64_t wrap_and_store_value(DataStoreType& store, const Entry& entry) noexcept {
auto replicas_ref = store.add(entry.getBucketInfo().getRawNodes());
return value_from(entry.getBucketInfo().getLastGarbageCollectionTime(), replicas_ref);
}
diff --git a/storage/src/vespa/storage/bucketdb/btree_lockable_map.h b/storage/src/vespa/storage/bucketdb/btree_lockable_map.h
index a31cad7bf35..136baefb615 100644
--- a/storage/src/vespa/storage/bucketdb/btree_lockable_map.h
+++ b/storage/src/vespa/storage/bucketdb/btree_lockable_map.h
@@ -18,6 +18,11 @@ namespace storage::bucketdb {
template <typename DataStoreTraitsT> class GenericBTreeBucketDatabase;
+/*
+ * AbstractBucketMap implementation that uses a B-tree bucket database backing structure.
+ *
+ * Identical global and per-bucket locking semantics as LockableMap.
+ */
template <typename T>
class BTreeLockableMap : public AbstractBucketMap<T> {
struct ValueTraits;
diff --git a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
index 4d0db1e6979..9c7228ae21d 100644
--- a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
+++ b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
@@ -40,11 +40,11 @@ struct BTreeLockableMap<T>::ValueTraits {
static ValueType make_invalid_value() {
return ValueType();
}
- static uint64_t store_and_wrap_value(DataStoreType& store, const ValueType& value) noexcept {
+ static uint64_t wrap_and_store_value(DataStoreType& store, const ValueType& value) noexcept {
return store.addEntry(value).ref();
}
static void remove_by_wrapped_value(DataStoreType& store, uint64_t value) noexcept {
- store.freeElem(entry_ref_from_value(value), 1);
+ store.holdElem(entry_ref_from_value(value), 1);
}
static ValueType unwrap_from_key_value(const DataStoreType& store, [[maybe_unused]] uint64_t key, uint64_t value) {
return store.getEntry(entry_ref_from_value(value));
@@ -142,9 +142,7 @@ template <typename T>
size_t BTreeLockableMap<T>::getMemoryUsage() const noexcept {
std::lock_guard guard(_lock);
const auto impl_usage = _impl->memory_usage();
- return (impl_usage.allocatedBytes() + impl_usage.usedBytes() +
- impl_usage.deadBytes() + impl_usage.allocatedBytesOnHold() +
- _lockedKeys.getMemoryUsage() +
+ return (impl_usage.allocatedBytes() + _lockedKeys.getMemoryUsage() +
sizeof(std::mutex) + sizeof(std::condition_variable));
}
@@ -218,7 +216,7 @@ void BTreeLockableMap<T>::insert(const key_type& key, const mapped_type& value,
template <typename T>
void BTreeLockableMap<T>::clear() {
- std::lock_guard<std::mutex> guard(_lock);
+ std::lock_guard guard(_lock);
_impl->clear();
}
@@ -278,7 +276,7 @@ void BTreeLockableMap<T>::do_for_each_mutable(std::function<Decision(uint64_t, m
if (findNextKey(key, val, clientId, guard) || key > last) {
return;
}
- Decision d(func(const_cast<const key_type&>(key), val));
+ Decision d(func(key, val));
if (handleDecision(key, val, d)) {
return;
}
@@ -299,7 +297,7 @@ void BTreeLockableMap<T>::do_for_each(std::function<Decision(uint64_t, const map
if (findNextKey(key, val, clientId, guard) || key > last) {
return;
}
- Decision d(func(const_cast<const key_type&>(key), val));
+ Decision d(func(key, val));
assert(d == Decision::ABORT || d == Decision::CONTINUE);
if (handleDecision(key, val, d)) {
return;
diff --git a/storage/src/vespa/storage/bucketdb/generic_btree_bucket_database.hpp b/storage/src/vespa/storage/bucketdb/generic_btree_bucket_database.hpp
index 6484ad70804..4b1b507d95a 100644
--- a/storage/src/vespa/storage/bucketdb/generic_btree_bucket_database.hpp
+++ b/storage/src/vespa/storage/bucketdb/generic_btree_bucket_database.hpp
@@ -186,7 +186,7 @@ GenericBTreeBucketDatabase<DataStoreTraitsT>::find_parents_internal(
auto candidate = BucketId(BucketId::keyToBucketId(iter.getKey()));
if (candidate.contains(bucket)) {
assert(candidate.getUsedBits() >= bits);
- func(iter.getKey(), entry_from_iterator(iter));
+ func(iter.getKey(), const_value_ref_from_valid_iterator(iter));
}
bits = next_parent_bit_seek_level(bits, candidate, bucket);
const auto parent_key = BucketId(bits, bucket.getRawId()).toKey();
@@ -272,7 +272,7 @@ template <typename DataStoreTraitsT>
bool GenericBTreeBucketDatabase<DataStoreTraitsT>::update_by_raw_key(uint64_t bucket_key,
const ValueType& new_entry)
{
- const auto new_value = DataStoreTraitsT::store_and_wrap_value(_store, new_entry);
+ const auto new_value = DataStoreTraitsT::wrap_and_store_value(_store, new_entry);
auto iter = _tree.lowerBound(bucket_key);
const bool pre_existed = (iter.valid() && (iter.getKey() == bucket_key));
if (pre_existed) {
@@ -421,7 +421,7 @@ struct BTreeBuilderMerger final : Merger<typename DataStoreTraitsT::ValueType> {
void insert_before_current(const BucketId& bucket_id, const ValueType& e) override {
const uint64_t bucket_key = bucket_id.toKey();
assert(bucket_key < _current_key);
- const auto new_value = DataStoreTraitsT::store_and_wrap_value(_db.store(), e);
+ const auto new_value = DataStoreTraitsT::wrap_and_store_value(_db.store(), e);
_builder.insert(bucket_key, new_value);
}
@@ -450,7 +450,7 @@ struct BTreeTrailingInserter final : TrailingInserter<typename DataStoreTraitsT:
void insert_at_end(const BucketId& bucket_id, const ValueType& e) override {
const uint64_t bucket_key = bucket_id.toKey();
- const auto new_value = DataStoreTraitsT::store_and_wrap_value(_db.store(), e);
+ const auto new_value = DataStoreTraitsT::wrap_and_store_value(_db.store(), e);
_builder.insert(bucket_key, new_value);
}
};
@@ -475,7 +475,7 @@ void GenericBTreeBucketDatabase<DataStoreTraitsT>::merge(MergingProcessor<ValueT
assert(merger._valid_cached_value); // Must actually have been touched
assert(merger._cached_value.valid());
DataStoreTraitsT::remove_by_wrapped_value(_store, value);
- const auto new_value = DataStoreTraitsT::store_and_wrap_value(_store, merger._cached_value);
+ const auto new_value = DataStoreTraitsT::wrap_and_store_value(_store, merger._cached_value);
builder.insert(key, new_value);
} else if (result == MergingProcessor<ValueType>::Result::Skip) {
DataStoreTraitsT::remove_by_wrapped_value(_store, value);