aboutsummaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2019-03-14 22:48:05 +0100
committerGitHub <noreply@github.com>2019-03-14 22:48:05 +0100
commit85f999c2235db4357ca7571b2278010caf83f3c5 (patch)
treeadde0c54389d657945a53c76eb7dcf569e718b0d /searchlib
parentbb21cf5880871a7ee9d3e55a276e3b4981b1d62d (diff)
parenta874b7f238e04c7d6a83b0ebbb404a1a54c4cb03 (diff)
Merge pull request #8789 from vespa-engine/geirst/add-raw-allocator-using-free-lists
Geirst/add raw allocator using free lists.
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp4
-rw-r--r--searchlib/src/tests/datastore/datastore/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/datastore/datastore/datastore_test.cpp311
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.hpp11
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h33
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp34
-rw-r--r--searchlib/src/vespa/searchlib/datastore/handle.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/raw_allocator.h2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp6
10 files changed, 270 insertions, 140 deletions
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index 2e339a069b6..bd814b0ad32 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -307,6 +307,10 @@ Fixture::testSaveLoad()
void
Fixture::testCompaction()
{
+ if (_useDenseTensorAttribute && _denseTensors && !_cfg.tensorType().is_abstract()) {
+ LOG(info, "Skipping compaction test for tensor '%s' which is using free-lists", _cfg.tensorType().to_spec().c_str());
+ return;
+ }
ensureSpace(4);
Tensor::UP emptytensor = _tensorAttr->getEmptyTensor();
Tensor::UP emptyxytensor = createTensor({}, {"x", "y"});
diff --git a/searchlib/src/tests/datastore/datastore/CMakeLists.txt b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
index b8922fff563..1bc6210a13f 100644
--- a/searchlib/src/tests/datastore/datastore/CMakeLists.txt
+++ b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
@@ -4,5 +4,6 @@ vespa_add_executable(searchlib_datastore_test_app TEST
datastore_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_datastore_test_app COMMAND searchlib_datastore_test_app)
diff --git a/searchlib/src/tests/datastore/datastore/datastore_test.cpp b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
index b312d2bfe55..aa26afa3077 100644
--- a/searchlib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
@@ -1,15 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/datastore/datastore.h>
#include <vespa/searchlib/datastore/datastore.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <vespa/log/log.h>
LOG_SETUP("datastore_test");
-namespace search {
-namespace datastore {
+namespace search::datastore {
using vespalib::alloc::MemoryAllocator;
@@ -91,6 +90,9 @@ public:
}
~GrowStore() { _store.dropBuffers(); }
+ Store &store() { return _store; }
+ uint32_t typeId() const { return _typeId; }
+
GrowthStats getGrowthStats(size_t bufs) {
GrowthStats sizes;
int prevBufferId = -1;
@@ -136,95 +138,94 @@ public:
using MyRef = MyStore::RefType;
-bool
+void
assertMemStats(const DataStoreBase::MemStats &exp,
const DataStoreBase::MemStats &act)
{
- if (!EXPECT_EQUAL(exp._allocElems, act._allocElems)) return false;
- if (!EXPECT_EQUAL(exp._usedElems, act._usedElems)) return false;
- if (!EXPECT_EQUAL(exp._deadElems, act._deadElems)) return false;
- if (!EXPECT_EQUAL(exp._holdElems, act._holdElems)) return false;
- if (!EXPECT_EQUAL(exp._freeBuffers, act._freeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._activeBuffers, act._activeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._holdBuffers, act._holdBuffers)) return false;
- return true;
+ EXPECT_EQ(exp._allocElems, act._allocElems);
+ EXPECT_EQ(exp._usedElems, act._usedElems);
+ EXPECT_EQ(exp._deadElems, act._deadElems);
+ EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
+ EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
+ EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
}
-TEST("require that entry ref is working")
+TEST(DataStoreTest, require_that_entry_ref_is_working)
{
using MyRefType = EntryRefT<22>;
- EXPECT_EQUAL(4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(237u, r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(237u, r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(4194303, 1023);
- EXPECT_EQUAL(4194303u, r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(4194303u, r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
{
MyRefType r1(6498, 76);
MyRefType r2(r1);
- EXPECT_EQUAL(r1.offset(), r2.offset());
- EXPECT_EQUAL(r1.bufferId(), r2.bufferId());
+ EXPECT_EQ(r1.offset(), r2.offset());
+ EXPECT_EQ(r1.bufferId(), r2.bufferId());
}
}
-TEST("require that aligned entry ref is working")
+TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
{
using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
- EXPECT_EQUAL(4 * 4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
- EXPECT_EQUAL(0u, MyRefType::align(0));
- EXPECT_EQUAL(4u, MyRefType::align(1));
- EXPECT_EQUAL(4u, MyRefType::align(2));
- EXPECT_EQUAL(4u, MyRefType::align(3));
- EXPECT_EQUAL(4u, MyRefType::align(4));
- EXPECT_EQUAL(8u, MyRefType::align(5));
+ EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(0u, MyRefType::align(0));
+ EXPECT_EQ(4u, MyRefType::align(1));
+ EXPECT_EQ(4u, MyRefType::align(2));
+ EXPECT_EQ(4u, MyRefType::align(3));
+ EXPECT_EQ(4u, MyRefType::align(4));
+ EXPECT_EQ(8u, MyRefType::align(5));
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(MyRefType::align(237), r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(237), r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(MyRefType::offsetSize() - 4, 1023);
- EXPECT_EQUAL(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
}
-TEST("require that entries can be added and retrieved")
+TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
{
using IntStore = DataStore<int>;
IntStore ds;
EntryRef r1 = ds.addEntry(10);
EntryRef r2 = ds.addEntry(20);
EntryRef r3 = ds.addEntry(30);
- EXPECT_EQUAL(1u, IntStore::RefType(r1).offset());
- EXPECT_EQUAL(2u, IntStore::RefType(r2).offset());
- EXPECT_EQUAL(3u, IntStore::RefType(r3).offset());
- EXPECT_EQUAL(0u, IntStore::RefType(r1).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r2).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r3).bufferId());
- EXPECT_EQUAL(10, ds.getEntry(r1));
- EXPECT_EQUAL(20, ds.getEntry(r2));
- EXPECT_EQUAL(30, ds.getEntry(r3));
+ EXPECT_EQ(1u, IntStore::RefType(r1).offset());
+ EXPECT_EQ(2u, IntStore::RefType(r2).offset());
+ EXPECT_EQ(3u, IntStore::RefType(r3).offset());
+ EXPECT_EQ(0u, IntStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r3).bufferId());
+ EXPECT_EQ(10, ds.getEntry(r1));
+ EXPECT_EQ(20, ds.getEntry(r2));
+ EXPECT_EQ(30, ds.getEntry(r3));
}
-TEST("require that add entry triggers change of buffer")
+TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer)
{
using Store = DataStore<uint64_t, EntryRefT<10, 10> >;
Store s;
@@ -233,12 +234,11 @@ TEST("require that add entry triggers change of buffer")
uint64_t lastNum = 0;
for (;;++num) {
EntryRef r = s.addEntry(num);
- EXPECT_EQUAL(num, s.getEntry(r));
+ EXPECT_EQ(num, s.getEntry(r));
uint32_t bufferId = Store::RefType(r).bufferId();
if (bufferId > lastId) {
LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num);
- EXPECT_EQUAL(Store::RefType::offsetSize() - (lastId == 0),
- num - lastNum);
+ EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum);
lastId = bufferId;
lastNum = num;
}
@@ -246,32 +246,32 @@ TEST("require that add entry triggers change of buffer")
break;
}
}
- EXPECT_EQUAL(Store::RefType::offsetSize() * 2 - 1, num);
+ EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num);
LOG(info, "Added %" PRIu64 " nums in 2 buffers", num);
}
-TEST("require that we can hold and trim buffers")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
{
MyStore s;
- EXPECT_EQUAL(0u, MyRef(s.addEntry(1)).bufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(1u, s.activeBufferId());
+ EXPECT_EQ(1u, s.activeBufferId());
s.holdBuffer(0); // hold last buffer
s.transferHoldLists(10);
- EXPECT_EQUAL(1u, MyRef(s.addEntry(2)).bufferId());
+ EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(2u, s.activeBufferId());
+ EXPECT_EQ(2u, s.activeBufferId());
s.holdBuffer(1); // hold last buffer
s.transferHoldLists(20);
- EXPECT_EQUAL(2u, MyRef(s.addEntry(3)).bufferId());
+ EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(3u, s.activeBufferId());
+ EXPECT_EQ(3u, s.activeBufferId());
s.holdBuffer(2); // hold last buffer
s.transferHoldLists(30);
- EXPECT_EQUAL(3u, MyRef(s.addEntry(4)).bufferId());
+ EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
s.holdBuffer(3); // hold current buffer
s.transferHoldLists(40);
@@ -286,8 +286,8 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() != 0);
s.switchActiveBuffer();
- EXPECT_EQUAL(0u, s.activeBufferId());
- EXPECT_EQUAL(0u, MyRef(s.addEntry(5)).bufferId());
+ EXPECT_EQ(0u, s.activeBufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
s.trimHoldLists(41);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
EXPECT_TRUE(s.getBufferState(1).size() == 0);
@@ -295,7 +295,7 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() == 0);
}
-TEST("require that we can hold and trim elements")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
{
MyStore s;
MyRef r1 = s.addEntry(1);
@@ -307,26 +307,42 @@ TEST("require that we can hold and trim elements")
MyRef r3 = s.addEntry(3);
s.holdElem(r3, 1);
s.transferHoldLists(30);
- EXPECT_EQUAL(1, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(1, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(11);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(31);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(0, s.getEntry(r2));
- EXPECT_EQUAL(0, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(0, s.getEntry(r2));
+ EXPECT_EQ(0, s.getEntry(r3));
}
+using IntHandle = Handle<int>;
+
MyRef
-toRef(Handle<int> handle)
+to_ref(IntHandle handle)
{
return MyRef(handle.ref);
}
-TEST("require that we can use free lists")
+std::ostream&
+operator<<(std::ostream &os, const IntHandle &rhs)
+{
+ MyRef ref(rhs.ref);
+ os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}";
+ return os;
+}
+
+void
+expect_successive_handles(const IntHandle &first, const IntHandle &second)
+{
+ EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset());
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists)
{
MyStore s;
s.enableFreeLists();
@@ -335,29 +351,54 @@ TEST("require that we can use free lists")
s.holdElem(h1.ref, 1);
s.transferHoldLists(10);
auto h2 = allocator.alloc(2);
+ expect_successive_handles(h1, h2);
s.holdElem(h2.ref, 1);
s.transferHoldLists(20);
s.trimElemHoldList(11);
auto h3 = allocator.alloc(3); // reuse h1.ref
- EXPECT_EQUAL(toRef(h1).offset(), toRef(h3).offset());
- EXPECT_EQUAL(toRef(h1).bufferId(), toRef(h3).bufferId());
+ EXPECT_EQ(h1, h3);
auto h4 = allocator.alloc(4);
- EXPECT_EQUAL(toRef(h2).offset() + 1, toRef(h4).offset());
+ expect_successive_handles(h2, h4);
s.trimElemHoldList(21);
auto h5 = allocator.alloc(5); // reuse h2.ref
- EXPECT_EQUAL(toRef(h2).offset(), toRef(h5).offset());
- EXPECT_EQUAL(toRef(h2).bufferId(), toRef(h5).bufferId());
+ EXPECT_EQ(h2, h5);
auto h6 = allocator.alloc(6);
- EXPECT_EQUAL(toRef(h4).offset() + 1, toRef(h6).offset());
- EXPECT_EQUAL(3, s.getEntry(h1.ref));
- EXPECT_EQUAL(5, s.getEntry(h2.ref));
- EXPECT_EQUAL(3, s.getEntry(h3.ref));
- EXPECT_EQUAL(4, s.getEntry(h4.ref));
- EXPECT_EQUAL(5, s.getEntry(h5.ref));
- EXPECT_EQUAL(6, s.getEntry(h6.ref));
+ expect_successive_handles(h4, h6);
+ EXPECT_EQ(3, s.getEntry(h1.ref));
+ EXPECT_EQ(5, s.getEntry(h2.ref));
+ EXPECT_EQ(3, s.getEntry(h3.ref));
+ EXPECT_EQ(4, s.getEntry(h4.ref));
+ EXPECT_EQ(5, s.getEntry(h5.ref));
+ EXPECT_EQ(6, s.getEntry(h6.ref));
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
+{
+ GrowStore<int, MyRef> grow_store(3, 64, 64, 64);
+ auto &s = grow_store.store();
+ s.enableFreeLists();
+ auto allocator = s.freeListRawAllocator<int>(grow_store.typeId());
+
+ auto h1 = allocator.alloc(3);
+ auto h2 = allocator.alloc(3);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h1.ref, 3);
+ s.holdElem(h2.ref, 3);
+ s.transferHoldLists(10);
+ s.trimElemHoldList(11);
+
+ auto h3 = allocator.alloc(3); // reuse h2.ref from free list
+ EXPECT_EQ(h2, h3);
+
+ auto h4 = allocator.alloc(3); // reuse h1.ref from free list
+ EXPECT_EQ(h1, h4);
+
+ auto h5 = allocator.alloc(3);
+ expect_successive_handles(h2, h5);
+ expect_successive_handles(h3, h5);
}
-TEST("require that memory stats are calculated")
+TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
DataStoreBase::MemStats m;
@@ -368,17 +409,17 @@ TEST("require that memory stats are calculated")
m._activeBuffers = 1;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// add entry
MyRef r = s.addEntry(10);
m._usedElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// inc dead
s.incDead(r, 1);
m._deadElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// hold buffer
s.addEntry(20);
@@ -389,7 +430,7 @@ TEST("require that memory stats are calculated")
m._holdElems += 2; // used - dead
m._activeBuffers--;
m._holdBuffers++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// new active buffer
s.switchActiveBuffer();
@@ -407,10 +448,10 @@ TEST("require that memory stats are calculated")
m._holdElems = 0;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
}
-TEST("require that memory usage is calculated")
+TEST(DataStoreTest, require_that_memory_usage_is_calculated)
{
MyStore s;
MyRef r = s.addEntry(10);
@@ -421,14 +462,14 @@ TEST("require that memory usage is calculated")
s.holdBuffer(r.bufferId());
s.transferHoldLists(100);
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(5 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(3 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold());
s.trimHoldLists(101);
}
-TEST("require that we can disable elemement hold list")
+TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
{
MyStore s;
MyRef r1 = s.addEntry(10);
@@ -436,23 +477,23 @@ TEST("require that we can disable elemement hold list")
MyRef r3 = s.addEntry(30);
(void) r3;
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(0 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
s.holdElem(r1, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.disableElemHoldList();
s.holdElem(r2, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.transferHoldLists(100);
s.trimHoldLists(101);
}
@@ -466,39 +507,39 @@ void assertGrowStats(GrowthStats expSizes,
size_t expInitMemUsage,
size_t minClusters, size_t numClustersForNewBuffer, size_t maxClusters = 128)
{
- EXPECT_EQUAL(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
- EXPECT_EQUAL(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
- EXPECT_EQUAL(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
+ EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
}
}
-TEST("require that buffer growth works")
+TEST(DataStoreTest, require_that_buffer_growth_works)
{
// Always switch to new buffer, min size 4
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 4 }, 20, 4, 0));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 0, 1, 2, 4 }, 4, 0, 4));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 16 }, 68, 16, 0));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 16 }, 68, 16, 0);
// Resize if buffer size is less than 16, min size 0
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 0, 1, 2, 4, 8, 16 }, 4, 0, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
// Resize if buffer size is less than 16, min size 4
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 4, 8, 16 }, 20, 4, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 4, 8, 16 }, 20, 4, 16);
// Always switch to new buffer, min size 0
- TEST_DO(assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
- { 0, 1 }, 4, 0, 0));
+ assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
+ { 0, 1 }, 4, 0, 0);
// Buffers with sizes larger than the huge page size of the mmap allocator.
- ASSERT_EQUAL(524288u, HUGE_PAGE_CLUSTER_SIZE);
- TEST_DO(assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
- { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
- 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5));
+ ASSERT_EQ(524288u, HUGE_PAGE_CLUSTER_SIZE);
+ assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
+ { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
+ 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5);
}
using RefType15 = EntryRefT<15>; // offsetSize=32768
@@ -512,12 +553,12 @@ void assertGrowStats(GrowthStats expSizes, uint32_t clusterSize)
uint32_t maxClusters = RefType15::offsetSize();
uint32_t numClustersForNewBuffer = 2048;
GrowStore<DataType, RefType15> store(clusterSize, minClusters, maxClusters, numClustersForNewBuffer);
- EXPECT_EQUAL(expSizes, store.getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size()));
}
}
-TEST("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size")
+TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size)
{
/*
* When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp):
@@ -539,7 +580,5 @@ TEST("require that offset in EntryRefT is within bounds when allocating memory b
}
}
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.h b/searchlib/src/vespa/searchlib/datastore/datastore.h
index accff67e2fa..316ec34dc85 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.h
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.h
@@ -5,6 +5,7 @@
#include "allocator.h"
#include "datastorebase.h"
#include "free_list_allocator.h"
+#include "free_list_raw_allocator.h"
#include "raw_allocator.h"
namespace search::btree {
@@ -75,6 +76,9 @@ public:
template <typename EntryT>
RawAllocator<EntryT, RefT> rawAllocator(uint32_t typeId);
+ template <typename EntryT>
+ FreeListRawAllocator<EntryT, RefT> freeListRawAllocator(uint32_t typeId);
+
};
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.hpp b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
index 74e3d9be2e0..39706957e2d 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.hpp
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
@@ -2,9 +2,10 @@
#pragma once
-#include "datastore.h"
#include "allocator.hpp"
+#include "datastore.h"
#include "free_list_allocator.hpp"
+#include "free_list_raw_allocator.hpp"
#include "raw_allocator.hpp"
#include <vespa/vespalib/util/array.hpp>
@@ -130,7 +131,13 @@ DataStoreT<RefT>::rawAllocator(uint32_t typeId)
return RawAllocator<EntryT, RefT>(*this, typeId);
}
-
+template <typename RefT>
+template <typename EntryT>
+FreeListRawAllocator<EntryT, RefT>
+DataStoreT<RefT>::freeListRawAllocator(uint32_t typeId)
+{
+ return FreeListRawAllocator<EntryT, RefT>(*this, typeId);
+}
template <typename EntryType, typename RefT>
DataStore<EntryType, RefT>::DataStore()
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
new file mode 100644
index 00000000000..514eecc25a8
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
@@ -0,0 +1,33 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "raw_allocator.h"
+
+namespace search::datastore {
+
+/**
+ * Allocator used to allocate raw buffers (EntryT *) in an underlying data store
+ * with no construction or de-construction of elements in the buffer. Uses free lists if available.
+ *
+ * If free lists are enabled this allocator should only be used when
+ * allocating the same number of elements each time (equal to cluster size).
+ */
+template <typename EntryT, typename RefT>
+class FreeListRawAllocator : public RawAllocator<EntryT, RefT>
+{
+public:
+ using ParentType = RawAllocator<EntryT, RefT>;
+ using HandleType = typename ParentType::HandleType;
+
+private:
+ using ParentType::_store;
+ using ParentType::_typeId;
+
+public:
+ FreeListRawAllocator(DataStoreBase &store, uint32_t typeId);
+
+ HandleType alloc(size_t numElems);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
new file mode 100644
index 00000000000..662580ce4af
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
@@ -0,0 +1,34 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "free_list_raw_allocator.h"
+
+namespace search::datastore {
+
+template <typename EntryT, typename RefT>
+FreeListRawAllocator<EntryT, RefT>::FreeListRawAllocator(DataStoreBase &store, uint32_t typeId)
+ : ParentType(store, typeId)
+{
+}
+
+template <typename EntryT, typename RefT>
+typename FreeListRawAllocator<EntryT, RefT>::HandleType
+FreeListRawAllocator<EntryT, RefT>::alloc(size_t numElems)
+{
+ BufferState::FreeListList &freeListList = _store.getFreeList(_typeId);
+ if (freeListList._head == nullptr) {
+ return ParentType::alloc(numElems);
+ }
+ BufferState &state = *freeListList._head;
+ assert(state.isActive());
+ assert(state.getClusterSize() == numElems);
+ RefT ref = state.popFreeList();
+ // If entry ref is not aligned we must scale the offset according to cluster size as it was divided when the entry ref was created.
+ uint64_t offset = !RefT::isAlignedType ? ref.offset() * state.getClusterSize() : ref.offset();
+ EntryT *entry = _store.template getBufferEntry<EntryT>(ref.bufferId(), offset);
+ return HandleType(ref, entry);
+}
+
+}
+
diff --git a/searchlib/src/vespa/searchlib/datastore/handle.h b/searchlib/src/vespa/searchlib/datastore/handle.h
index c0dce8d3d75..49eb4843816 100644
--- a/searchlib/src/vespa/searchlib/datastore/handle.h
+++ b/searchlib/src/vespa/searchlib/datastore/handle.h
@@ -16,6 +16,10 @@ struct Handle
EntryT *data;
Handle(EntryRef ref_, EntryT *data_) : ref(ref_), data(data_) {}
Handle() : ref(), data() {}
+ bool operator==(const Handle<EntryT> &rhs) const {
+ return ref == rhs.ref &&
+ data == rhs.data;
+ }
};
}
diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
index d0b7d1d1ca2..b7c00f75580 100644
--- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
+++ b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
@@ -18,7 +18,7 @@ class RawAllocator
public:
using HandleType = Handle<EntryT>;
-private:
+protected:
DataStoreBase &_store;
uint32_t _typeId;
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
index 18c52c21206..eae0f5364b8 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
@@ -79,6 +79,10 @@ DenseTensorStore::DenseTensorStore(const ValueType &type)
_emptyCells.resize(_tensorSizeCalc._numBoundCells, 0.0);
_store.addType(&_bufferType);
_store.initActiveBuffers();
+ if (_tensorSizeCalc._numUnboundDims == 0) {
+ // In this case each tensor use the same amount of memory and we can re-use previously allocated raw buffers by using free lists.
+ _store.enableFreeLists();
+ }
}
DenseTensorStore::~DenseTensorStore()
@@ -120,7 +124,7 @@ DenseTensorStore::allocRawBuffer(size_t numCells)
{
size_t bufSize = numCells * _tensorSizeCalc._cellSize;
size_t alignedBufSize = alignedSize(numCells);
- auto result = _concreteStore.rawAllocator<char>(_typeId).alloc(alignedBufSize);
+ auto result = _concreteStore.freeListRawAllocator<char>(_typeId).alloc(alignedBufSize);
clearPadAreaAfterBuffer(result.data, bufSize, alignedBufSize, unboundDimSizesSize());
return result;
}