summaryrefslogtreecommitdiffstats
path: root/vespalib
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2021-03-31 16:35:19 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2021-03-31 16:35:19 +0000
commit17e22dbe818d445816937a03cccd7b8065af5e85 (patch)
treec0c0878871ac7c965ad26ffb38f799bdbdfd6241 /vespalib
parent377a98264c368abc53b11db078b62a63b315370a (diff)
Add ShardedHashComparator so that a single divison will be used for both dividend and remainder.
The compiler will also be smarter about it as it is a known constsnt compile time both.
Diffstat (limited to 'vespalib')
-rw-r--r--vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp25
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h34
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp34
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h2
5 files changed, 58 insertions, 45 deletions
diff --git a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
index b929b248e33..edaf26bb66a 100644
--- a/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
+++ b/vespalib/src/tests/datastore/fixed_size_hash_map/fixed_size_hash_map_test.cpp
@@ -108,7 +108,7 @@ DataStoreFixedSizeHashTest::insert(uint32_t key)
{
MyCompare comp(_store, key);
std::function<EntryRef(void)> insert_entry([this, key]() -> EntryRef { return _allocator.allocate(key); });
- auto& result = _hash_map->add(comp, EntryRef(), insert_entry);
+ auto& result = _hash_map->add(_hash_map->getComp(comp), insert_entry);
auto ref = result.first.load_relaxed();
auto &wrapped_entry = _allocator.get_wrapped(ref);
EXPECT_EQ(key, wrapped_entry.value());
@@ -118,7 +118,7 @@ void
DataStoreFixedSizeHashTest::remove(uint32_t key)
{
MyCompare comp(_store, key);
- auto result = _hash_map->remove(comp, EntryRef());
+ auto result = _hash_map->remove(_hash_map->getComp(comp));
if (result != nullptr) {
auto ref = result->first.load_relaxed();
auto &wrapped_entry = _allocator.get_wrapped(ref);
@@ -131,7 +131,7 @@ bool
DataStoreFixedSizeHashTest::has_key(uint32_t key)
{
MyCompare comp(_store, key);
- auto result = _hash_map->find(comp, EntryRef());
+ auto result = _hash_map->find(_hash_map->getComp(comp));
if (result != nullptr) {
auto ref = result->first.load_relaxed();
auto& wrapped_entry = _allocator.get_wrapped(ref);
@@ -271,7 +271,7 @@ TEST_F(DataStoreFixedSizeHashTest, lookups_works_after_insert_and_remove)
}
for (auto &kv : expected) {
MyCompare comp(_store, kv.first);
- EXPECT_EQ(kv.second, _hash_map->find(comp, EntryRef()) != nullptr);
+ EXPECT_EQ(kv.second, _hash_map->find(_hash_map->getComp(comp)) != nullptr);
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
index a8d5fd5ac5f..130abd0ba50 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
@@ -52,8 +52,8 @@ FixedSizeHashMap::~FixedSizeHashMap() = default;
void
FixedSizeHashMap::force_add(const EntryComparator& comp, const KvType& kv)
{
- size_t hash_idx = comp.hash(kv.first.load_relaxed()) / _num_shards;
- hash_idx %= _modulo;
+ ShardedHashComparator shardedComp(comp, kv.first.load_relaxed(), _num_shards);
+ uint32_t hash_idx = shardedComp.hash_idx() % _modulo;
auto& chain_head = _chain_heads[hash_idx];
assert(_nodes.size() < _nodes.capacity());
uint32_t node_idx = _nodes.size();
@@ -63,15 +63,14 @@ FixedSizeHashMap::force_add(const EntryComparator& comp, const KvType& kv)
}
FixedSizeHashMap::KvType&
-FixedSizeHashMap::add(const EntryComparator& comp, EntryRef key_ref, std::function<EntryRef(void)>& insert_entry)
+FixedSizeHashMap::add(const ShardedHashComparator & comp, std::function<EntryRef(void)>& insert_entry)
{
- size_t hash_idx = comp.hash(key_ref) / _num_shards;
- hash_idx %= _modulo;
+ uint32_t hash_idx = comp.hash_idx() % _modulo;
auto& chain_head = _chain_heads[hash_idx];
uint32_t node_idx = chain_head.load_relaxed();
while (node_idx != no_node_idx) {
auto& node = _nodes[node_idx];
- if (comp.equal(key_ref, node.get_kv().first.load_relaxed())) {
+ if (comp.equal(node.get_kv().first.load_relaxed())) {
return node.get_kv();
}
node_idx = node.get_next_node_idx().load(std::memory_order_relaxed);
@@ -127,17 +126,16 @@ FixedSizeHashMap::trim_hold_lists_slow(generation_t first_used)
}
FixedSizeHashMap::KvType*
-FixedSizeHashMap::remove(const EntryComparator& comp, EntryRef key_ref)
+FixedSizeHashMap::remove(const ShardedHashComparator & comp)
{
- size_t hash_idx = comp.hash(key_ref) / _num_shards;
- hash_idx %= _modulo;
+ uint32_t hash_idx = comp.hash_idx() % _modulo;
auto& chain_head = _chain_heads[hash_idx];
uint32_t node_idx = chain_head.load_relaxed();
uint32_t prev_node_idx = no_node_idx;
while (node_idx != no_node_idx) {
auto &node = _nodes[node_idx];
uint32_t next_node_idx = node.get_next_node_idx().load(std::memory_order_relaxed);
- if (comp.equal(key_ref, node.get_kv().first.load_relaxed())) {
+ if (comp.equal(node.get_kv().first.load_relaxed())) {
if (prev_node_idx != no_node_idx) {
_nodes[prev_node_idx].get_next_node_idx().store(next_node_idx, std::memory_order_release);
} else {
@@ -155,16 +153,15 @@ FixedSizeHashMap::remove(const EntryComparator& comp, EntryRef key_ref)
}
FixedSizeHashMap::KvType*
-FixedSizeHashMap::find(const EntryComparator& comp, EntryRef key_ref)
+FixedSizeHashMap::find(const ShardedHashComparator & comp)
{
- size_t hash_idx = comp.hash(key_ref) / _num_shards;
- hash_idx %= _modulo;
+ uint32_t hash_idx = comp.hash_idx() % _modulo;
auto& chain_head = _chain_heads[hash_idx];
uint32_t node_idx = chain_head.load_acquire();
while (node_idx != no_node_idx) {
auto &node = _nodes[node_idx];
EntryRef node_key_ref = node.get_kv().first.load_acquire();
- if (node_key_ref.valid() && comp.equal(key_ref, node_key_ref)) {
+ if (node_key_ref.valid() && comp.equal(node_key_ref)) {
return &_nodes[node_idx].get_kv();
}
node_idx = node.get_next_node_idx().load(std::memory_order_acquire);
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
index 05a1006a5d5..3ccf690af8e 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
@@ -3,6 +3,7 @@
#pragma once
#include "atomic_entry_ref.h"
+#include "entry_comparator.h"
#include <vespa/vespalib/util/array.h>
#include <vespa/vespalib/util/arrayref.h>
#include <vespa/vespalib/util/generationhandler.h>
@@ -17,7 +18,27 @@ class MemoryUsage;
}
namespace vespalib::datastore {
-class EntryComparator;
+class ShardedHashComparator {
+public:
+ ShardedHashComparator(const EntryComparator& comp, const EntryRef key_ref, uint32_t num_shards)
+ : _comp(comp),
+ _key_ref(key_ref)
+ {
+ size_t hash = comp.hash(key_ref);
+ _shard_idx = hash % num_shards;
+ _hash_idx = hash / num_shards;
+ }
+ uint32_t hash_idx() const { return _hash_idx; }
+ uint32_t shard_idx() const { return _shard_idx; }
+ bool equal(const EntryRef rhs) const {
+ return _comp.equal(_key_ref, rhs);
+ }
+private:
+ const EntryComparator& _comp;
+ const EntryRef _key_ref;
+ uint32_t _shard_idx;
+ uint32_t _hash_idx;
+};
/*
* Fixed sized hash map over keys in data store, meant to support a faster
@@ -42,7 +63,6 @@ public:
using KvType = std::pair<AtomicEntryRef, AtomicEntryRef>;
using generation_t = GenerationHandler::generation_t;
using sgeneration_t = GenerationHandler::sgeneration_t;
-
private:
class ChainHead {
std::atomic<uint32_t> _node_idx;
@@ -99,9 +119,13 @@ public:
FixedSizeHashMap(uint32_t module, uint32_t capacity, uint32_t num_shards, const FixedSizeHashMap &orig, const EntryComparator& comp);
~FixedSizeHashMap();
- KvType& add(const EntryComparator& comp, EntryRef key_ref, std::function<EntryRef(void)>& insert_entry);
- KvType* remove(const EntryComparator& comp, EntryRef key_ref);
- KvType* find(const EntryComparator& comp, EntryRef key_ref);
+ ShardedHashComparator getComp(const EntryComparator& comp) {
+ return ShardedHashComparator(comp, EntryRef(), _num_shards);
+ }
+
+ KvType& add(const ShardedHashComparator & comp, std::function<EntryRef(void)>& insert_entry);
+ KvType* remove(const ShardedHashComparator & comp);
+ KvType* find(const ShardedHashComparator & comp);
void transfer_hold_lists(generation_t generation) {
if (!_hold_1_list.empty()) {
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
index 6dc8f55f3ee..36d68873176 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
@@ -39,12 +39,6 @@ ShardedHashMap::~ShardedHashMap()
}
}
-size_t
-ShardedHashMap::get_shard_idx(const EntryComparator& comp, EntryRef key_ref)
-{
- return comp.hash(key_ref) % num_shards;
-}
-
void
ShardedHashMap::alloc_shard(size_t shard_idx)
{
@@ -70,46 +64,46 @@ ShardedHashMap::hold_shard(std::unique_ptr<const FixedSizeHashMap> map)
ShardedHashMap::KvType&
ShardedHashMap::add(const EntryComparator& comp, EntryRef key_ref, std::function<EntryRef(void)>& insert_entry)
{
- size_t shard_idx = get_shard_idx(comp, key_ref);
- auto map = _maps[shard_idx].load(std::memory_order_relaxed);
+ ShardedHashComparator shardedComp(comp, key_ref, num_shards);
+ auto map = _maps[shardedComp.shard_idx()].load(std::memory_order_relaxed);
if (map == nullptr || map->full()) {
- alloc_shard(shard_idx);
- map = _maps[shard_idx].load(std::memory_order_relaxed);
+ alloc_shard(shardedComp.shard_idx());
+ map = _maps[shardedComp.shard_idx()].load(std::memory_order_relaxed);
}
- return map->add(comp, key_ref, insert_entry);
+ return map->add(shardedComp, insert_entry);
}
ShardedHashMap::KvType*
ShardedHashMap::remove(const EntryComparator& comp, EntryRef key_ref)
{
- size_t shard_idx = get_shard_idx(comp, key_ref);
- auto map = _maps[shard_idx].load(std::memory_order_relaxed);
+ ShardedHashComparator shardedComp(comp, key_ref, num_shards);
+ auto map = _maps[shardedComp.shard_idx()].load(std::memory_order_relaxed);
if (map == nullptr) {
return nullptr;
}
- return map->remove(comp, key_ref);
+ return map->remove(shardedComp);
}
ShardedHashMap::KvType*
ShardedHashMap::find(const EntryComparator& comp, EntryRef key_ref)
{
- size_t shard_idx = get_shard_idx(comp, key_ref);
- auto map = _maps[shard_idx].load(std::memory_order_relaxed);
+ ShardedHashComparator shardedComp(comp, key_ref, num_shards);
+ auto map = _maps[shardedComp.shard_idx()].load(std::memory_order_relaxed);
if (map == nullptr) {
return nullptr;
}
- return map->find(comp, key_ref);
+ return map->find(shardedComp);
}
const ShardedHashMap::KvType*
ShardedHashMap::find(const EntryComparator& comp, EntryRef key_ref) const
{
- size_t shard_idx = get_shard_idx(comp, key_ref);
- auto map = _maps[shard_idx].load(std::memory_order_relaxed);
+ ShardedHashComparator shardedComp(comp, key_ref, num_shards);
+ auto map = _maps[shardedComp.shard_idx()].load(std::memory_order_relaxed);
if (map == nullptr) {
return nullptr;
}
- return map->find(comp, key_ref);
+ return map->find(shardedComp);
}
void
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
index 73ee8c05feb..aa787421634 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
@@ -5,7 +5,6 @@
#include "atomic_entry_ref.h"
#include <atomic>
#include <vespa/vespalib/util/generationholder.h>
-#include <vespa/vespalib/stllike/string.h>
#include <functional>
namespace vespalib { class MemoryUsage; }
@@ -42,7 +41,6 @@ private:
std::atomic<FixedSizeHashMap *> _maps[num_shards];
std::unique_ptr<const EntryComparator> _comp;
- VESPA_DLL_LOCAL static size_t get_shard_idx(const EntryComparator& comp, EntryRef key_ref);
void alloc_shard(size_t shard_idx);
void hold_shard(std::unique_ptr<const FixedSizeHashMap> map);
public: