summaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
authorGeir Storli <geirst@verizonmedia.com>2019-03-14 08:59:04 +0000
committerGeir Storli <geirst@verizonmedia.com>2019-03-14 08:59:04 +0000
commite1c88ac18ff874a509b67540bb99b223d5bc5c07 (patch)
treed6ceb9aabeba23f1c570ad4c72d9cb10c85a42a9 /searchlib
parent71014b3999a3ab01aface05395491606bc6f4fe1 (diff)
Implement raw allocator that can use free lists.
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/src/tests/datastore/datastore/datastore_test.cpp58
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.hpp11
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h33
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp34
-rw-r--r--searchlib/src/vespa/searchlib/datastore/handle.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/raw_allocator.h2
7 files changed, 136 insertions, 10 deletions
diff --git a/searchlib/src/tests/datastore/datastore/datastore_test.cpp b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
index 1141838d07b..aa26afa3077 100644
--- a/searchlib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
@@ -90,6 +90,9 @@ public:
}
~GrowStore() { _store.dropBuffers(); }
+ Store &store() { return _store; }
+ uint32_t typeId() const { return _typeId; }
+
GrowthStats getGrowthStats(size_t bufs) {
GrowthStats sizes;
int prevBufferId = -1;
@@ -317,12 +320,28 @@ TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
EXPECT_EQ(0, s.getEntry(r3));
}
+using IntHandle = Handle<int>;
+
MyRef
-toRef(Handle<int> handle)
+to_ref(IntHandle handle)
{
return MyRef(handle.ref);
}
+std::ostream&
+operator<<(std::ostream &os, const IntHandle &rhs)
+{
+ MyRef ref(rhs.ref);
+ os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}";
+ return os;
+}
+
+void
+expect_successive_handles(const IntHandle &first, const IntHandle &second)
+{
+ EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset());
+}
+
TEST(DataStoreTest, require_that_we_can_use_free_lists)
{
MyStore s;
@@ -332,20 +351,19 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists)
s.holdElem(h1.ref, 1);
s.transferHoldLists(10);
auto h2 = allocator.alloc(2);
+ expect_successive_handles(h1, h2);
s.holdElem(h2.ref, 1);
s.transferHoldLists(20);
s.trimElemHoldList(11);
auto h3 = allocator.alloc(3); // reuse h1.ref
- EXPECT_EQ(toRef(h1).offset(), toRef(h3).offset());
- EXPECT_EQ(toRef(h1).bufferId(), toRef(h3).bufferId());
+ EXPECT_EQ(h1, h3);
auto h4 = allocator.alloc(4);
- EXPECT_EQ(toRef(h2).offset() + 1, toRef(h4).offset());
+ expect_successive_handles(h2, h4);
s.trimElemHoldList(21);
auto h5 = allocator.alloc(5); // reuse h2.ref
- EXPECT_EQ(toRef(h2).offset(), toRef(h5).offset());
- EXPECT_EQ(toRef(h2).bufferId(), toRef(h5).bufferId());
+ EXPECT_EQ(h2, h5);
auto h6 = allocator.alloc(6);
- EXPECT_EQ(toRef(h4).offset() + 1, toRef(h6).offset());
+ expect_successive_handles(h4, h6);
EXPECT_EQ(3, s.getEntry(h1.ref));
EXPECT_EQ(5, s.getEntry(h2.ref));
EXPECT_EQ(3, s.getEntry(h3.ref));
@@ -354,6 +372,32 @@ TEST(DataStoreTest, require_that_we_can_use_free_lists)
EXPECT_EQ(6, s.getEntry(h6.ref));
}
+TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
+{
+ GrowStore<int, MyRef> grow_store(3, 64, 64, 64);
+ auto &s = grow_store.store();
+ s.enableFreeLists();
+ auto allocator = s.freeListRawAllocator<int>(grow_store.typeId());
+
+ auto h1 = allocator.alloc(3);
+ auto h2 = allocator.alloc(3);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h1.ref, 3);
+ s.holdElem(h2.ref, 3);
+ s.transferHoldLists(10);
+ s.trimElemHoldList(11);
+
+ auto h3 = allocator.alloc(3); // reuse h2.ref from free list
+ EXPECT_EQ(h2, h3);
+
+ auto h4 = allocator.alloc(3); // reuse h1.ref from free list
+ EXPECT_EQ(h1, h4);
+
+ auto h5 = allocator.alloc(3);
+ expect_successive_handles(h2, h5);
+ expect_successive_handles(h3, h5);
+}
+
TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.h b/searchlib/src/vespa/searchlib/datastore/datastore.h
index accff67e2fa..316ec34dc85 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.h
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.h
@@ -5,6 +5,7 @@
#include "allocator.h"
#include "datastorebase.h"
#include "free_list_allocator.h"
+#include "free_list_raw_allocator.h"
#include "raw_allocator.h"
namespace search::btree {
@@ -75,6 +76,9 @@ public:
template <typename EntryT>
RawAllocator<EntryT, RefT> rawAllocator(uint32_t typeId);
+ template <typename EntryT>
+ FreeListRawAllocator<EntryT, RefT> freeListRawAllocator(uint32_t typeId);
+
};
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.hpp b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
index 74e3d9be2e0..39706957e2d 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.hpp
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
@@ -2,9 +2,10 @@
#pragma once
-#include "datastore.h"
#include "allocator.hpp"
+#include "datastore.h"
#include "free_list_allocator.hpp"
+#include "free_list_raw_allocator.hpp"
#include "raw_allocator.hpp"
#include <vespa/vespalib/util/array.hpp>
@@ -130,7 +131,13 @@ DataStoreT<RefT>::rawAllocator(uint32_t typeId)
return RawAllocator<EntryT, RefT>(*this, typeId);
}
-
+template <typename RefT>
+template <typename EntryT>
+FreeListRawAllocator<EntryT, RefT>
+DataStoreT<RefT>::freeListRawAllocator(uint32_t typeId)
+{
+ return FreeListRawAllocator<EntryT, RefT>(*this, typeId);
+}
template <typename EntryType, typename RefT>
DataStore<EntryType, RefT>::DataStore()
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
new file mode 100644
index 00000000000..514eecc25a8
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
@@ -0,0 +1,33 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "raw_allocator.h"
+
+namespace search::datastore {
+
+/**
+ * Allocator used to allocate raw buffers (EntryT *) in an underlying data store
+ * with no construction or de-construction of elements in the buffer. Uses free lists if available.
+ *
+ * If free lists are enabled this allocator should only be used when
+ * allocating the same number of elements each time (equal to cluster size).
+ */
+template <typename EntryT, typename RefT>
+class FreeListRawAllocator : public RawAllocator<EntryT, RefT>
+{
+public:
+ using ParentType = RawAllocator<EntryT, RefT>;
+ using HandleType = typename ParentType::HandleType;
+
+private:
+ using ParentType::_store;
+ using ParentType::_typeId;
+
+public:
+ FreeListRawAllocator(DataStoreBase &store, uint32_t typeId);
+
+ HandleType alloc(size_t numElems);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
new file mode 100644
index 00000000000..662580ce4af
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
@@ -0,0 +1,34 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "free_list_raw_allocator.h"
+
+namespace search::datastore {
+
+template <typename EntryT, typename RefT>
+FreeListRawAllocator<EntryT, RefT>::FreeListRawAllocator(DataStoreBase &store, uint32_t typeId)
+ : ParentType(store, typeId)
+{
+}
+
+template <typename EntryT, typename RefT>
+typename FreeListRawAllocator<EntryT, RefT>::HandleType
+FreeListRawAllocator<EntryT, RefT>::alloc(size_t numElems)
+{
+ BufferState::FreeListList &freeListList = _store.getFreeList(_typeId);
+ if (freeListList._head == nullptr) {
+ return ParentType::alloc(numElems);
+ }
+ BufferState &state = *freeListList._head;
+ assert(state.isActive());
+ assert(state.getClusterSize() == numElems);
+ RefT ref = state.popFreeList();
+ // If entry ref is not aligned we must scale the offset according to cluster size as it was divided when the entry ref was created.
+ uint64_t offset = !RefT::isAlignedType ? ref.offset() * state.getClusterSize() : ref.offset();
+ EntryT *entry = _store.template getBufferEntry<EntryT>(ref.bufferId(), offset);
+ return HandleType(ref, entry);
+}
+
+}
+
diff --git a/searchlib/src/vespa/searchlib/datastore/handle.h b/searchlib/src/vespa/searchlib/datastore/handle.h
index c0dce8d3d75..49eb4843816 100644
--- a/searchlib/src/vespa/searchlib/datastore/handle.h
+++ b/searchlib/src/vespa/searchlib/datastore/handle.h
@@ -16,6 +16,10 @@ struct Handle
EntryT *data;
Handle(EntryRef ref_, EntryT *data_) : ref(ref_), data(data_) {}
Handle() : ref(), data() {}
+ bool operator==(const Handle<EntryT> &rhs) const {
+ return ref == rhs.ref &&
+ data == rhs.data;
+ }
};
}
diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
index d0b7d1d1ca2..b7c00f75580 100644
--- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
+++ b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
@@ -18,7 +18,7 @@ class RawAllocator
public:
using HandleType = Handle<EntryT>;
-private:
+protected:
DataStoreBase &_store;
uint32_t _typeId;