aboutsummaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp4
-rw-r--r--searchlib/src/tests/datastore/datastore/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/datastore/datastore/datastore_test.cpp311
-rw-r--r--searchlib/src/tests/engine/searchapi/searchapi_test.cpp15
-rw-r--r--searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp20
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/datastore.hpp11
-rw-r--r--searchlib/src/vespa/searchlib/datastore/entryref.h2
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h33
-rw-r--r--searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp34
-rw-r--r--searchlib/src/vespa/searchlib/datastore/handle.h4
-rw-r--r--searchlib/src/vespa/searchlib/datastore/raw_allocator.h2
-rw-r--r--searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp10
-rw-r--r--searchlib/src/vespa/searchlib/engine/packetconverter.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/engine/request.h5
-rw-r--r--searchlib/src/vespa/searchlib/engine/trace.cpp38
-rw-r--r--searchlib/src/vespa/searchlib/engine/trace.h37
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp87
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h33
20 files changed, 438 insertions, 217 deletions
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index c55aadb5eae..11863bead42 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -16,7 +16,7 @@ vespa_define_module(
searchcommon
EXTERNAL_DEPENDS
- rt
+ ${VESPA_GLIBC_RT_LIB}
LIBS
src/vespa/searchlib
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index 2e339a069b6..bd814b0ad32 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -307,6 +307,10 @@ Fixture::testSaveLoad()
void
Fixture::testCompaction()
{
+ if (_useDenseTensorAttribute && _denseTensors && !_cfg.tensorType().is_abstract()) {
+ LOG(info, "Skipping compaction test for tensor '%s' which is using free-lists", _cfg.tensorType().to_spec().c_str());
+ return;
+ }
ensureSpace(4);
Tensor::UP emptytensor = _tensorAttr->getEmptyTensor();
Tensor::UP emptyxytensor = createTensor({}, {"x", "y"});
diff --git a/searchlib/src/tests/datastore/datastore/CMakeLists.txt b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
index b8922fff563..1bc6210a13f 100644
--- a/searchlib/src/tests/datastore/datastore/CMakeLists.txt
+++ b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
@@ -4,5 +4,6 @@ vespa_add_executable(searchlib_datastore_test_app TEST
datastore_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_datastore_test_app COMMAND searchlib_datastore_test_app)
diff --git a/searchlib/src/tests/datastore/datastore/datastore_test.cpp b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
index b312d2bfe55..aa26afa3077 100644
--- a/searchlib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
@@ -1,15 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/datastore/datastore.h>
#include <vespa/searchlib/datastore/datastore.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <vespa/log/log.h>
LOG_SETUP("datastore_test");
-namespace search {
-namespace datastore {
+namespace search::datastore {
using vespalib::alloc::MemoryAllocator;
@@ -91,6 +90,9 @@ public:
}
~GrowStore() { _store.dropBuffers(); }
+ Store &store() { return _store; }
+ uint32_t typeId() const { return _typeId; }
+
GrowthStats getGrowthStats(size_t bufs) {
GrowthStats sizes;
int prevBufferId = -1;
@@ -136,95 +138,94 @@ public:
using MyRef = MyStore::RefType;
-bool
+void
assertMemStats(const DataStoreBase::MemStats &exp,
const DataStoreBase::MemStats &act)
{
- if (!EXPECT_EQUAL(exp._allocElems, act._allocElems)) return false;
- if (!EXPECT_EQUAL(exp._usedElems, act._usedElems)) return false;
- if (!EXPECT_EQUAL(exp._deadElems, act._deadElems)) return false;
- if (!EXPECT_EQUAL(exp._holdElems, act._holdElems)) return false;
- if (!EXPECT_EQUAL(exp._freeBuffers, act._freeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._activeBuffers, act._activeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._holdBuffers, act._holdBuffers)) return false;
- return true;
+ EXPECT_EQ(exp._allocElems, act._allocElems);
+ EXPECT_EQ(exp._usedElems, act._usedElems);
+ EXPECT_EQ(exp._deadElems, act._deadElems);
+ EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
+ EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
+ EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
}
-TEST("require that entry ref is working")
+TEST(DataStoreTest, require_that_entry_ref_is_working)
{
using MyRefType = EntryRefT<22>;
- EXPECT_EQUAL(4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(237u, r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(237u, r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(4194303, 1023);
- EXPECT_EQUAL(4194303u, r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(4194303u, r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
{
MyRefType r1(6498, 76);
MyRefType r2(r1);
- EXPECT_EQUAL(r1.offset(), r2.offset());
- EXPECT_EQUAL(r1.bufferId(), r2.bufferId());
+ EXPECT_EQ(r1.offset(), r2.offset());
+ EXPECT_EQ(r1.bufferId(), r2.bufferId());
}
}
-TEST("require that aligned entry ref is working")
+TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
{
using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
- EXPECT_EQUAL(4 * 4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
- EXPECT_EQUAL(0u, MyRefType::align(0));
- EXPECT_EQUAL(4u, MyRefType::align(1));
- EXPECT_EQUAL(4u, MyRefType::align(2));
- EXPECT_EQUAL(4u, MyRefType::align(3));
- EXPECT_EQUAL(4u, MyRefType::align(4));
- EXPECT_EQUAL(8u, MyRefType::align(5));
+ EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(0u, MyRefType::align(0));
+ EXPECT_EQ(4u, MyRefType::align(1));
+ EXPECT_EQ(4u, MyRefType::align(2));
+ EXPECT_EQ(4u, MyRefType::align(3));
+ EXPECT_EQ(4u, MyRefType::align(4));
+ EXPECT_EQ(8u, MyRefType::align(5));
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(MyRefType::align(237), r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(237), r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(MyRefType::offsetSize() - 4, 1023);
- EXPECT_EQUAL(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
}
-TEST("require that entries can be added and retrieved")
+TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
{
using IntStore = DataStore<int>;
IntStore ds;
EntryRef r1 = ds.addEntry(10);
EntryRef r2 = ds.addEntry(20);
EntryRef r3 = ds.addEntry(30);
- EXPECT_EQUAL(1u, IntStore::RefType(r1).offset());
- EXPECT_EQUAL(2u, IntStore::RefType(r2).offset());
- EXPECT_EQUAL(3u, IntStore::RefType(r3).offset());
- EXPECT_EQUAL(0u, IntStore::RefType(r1).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r2).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r3).bufferId());
- EXPECT_EQUAL(10, ds.getEntry(r1));
- EXPECT_EQUAL(20, ds.getEntry(r2));
- EXPECT_EQUAL(30, ds.getEntry(r3));
+ EXPECT_EQ(1u, IntStore::RefType(r1).offset());
+ EXPECT_EQ(2u, IntStore::RefType(r2).offset());
+ EXPECT_EQ(3u, IntStore::RefType(r3).offset());
+ EXPECT_EQ(0u, IntStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r3).bufferId());
+ EXPECT_EQ(10, ds.getEntry(r1));
+ EXPECT_EQ(20, ds.getEntry(r2));
+ EXPECT_EQ(30, ds.getEntry(r3));
}
-TEST("require that add entry triggers change of buffer")
+TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer)
{
using Store = DataStore<uint64_t, EntryRefT<10, 10> >;
Store s;
@@ -233,12 +234,11 @@ TEST("require that add entry triggers change of buffer")
uint64_t lastNum = 0;
for (;;++num) {
EntryRef r = s.addEntry(num);
- EXPECT_EQUAL(num, s.getEntry(r));
+ EXPECT_EQ(num, s.getEntry(r));
uint32_t bufferId = Store::RefType(r).bufferId();
if (bufferId > lastId) {
LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num);
- EXPECT_EQUAL(Store::RefType::offsetSize() - (lastId == 0),
- num - lastNum);
+ EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum);
lastId = bufferId;
lastNum = num;
}
@@ -246,32 +246,32 @@ TEST("require that add entry triggers change of buffer")
break;
}
}
- EXPECT_EQUAL(Store::RefType::offsetSize() * 2 - 1, num);
+ EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num);
LOG(info, "Added %" PRIu64 " nums in 2 buffers", num);
}
-TEST("require that we can hold and trim buffers")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
{
MyStore s;
- EXPECT_EQUAL(0u, MyRef(s.addEntry(1)).bufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(1u, s.activeBufferId());
+ EXPECT_EQ(1u, s.activeBufferId());
s.holdBuffer(0); // hold last buffer
s.transferHoldLists(10);
- EXPECT_EQUAL(1u, MyRef(s.addEntry(2)).bufferId());
+ EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(2u, s.activeBufferId());
+ EXPECT_EQ(2u, s.activeBufferId());
s.holdBuffer(1); // hold last buffer
s.transferHoldLists(20);
- EXPECT_EQUAL(2u, MyRef(s.addEntry(3)).bufferId());
+ EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(3u, s.activeBufferId());
+ EXPECT_EQ(3u, s.activeBufferId());
s.holdBuffer(2); // hold last buffer
s.transferHoldLists(30);
- EXPECT_EQUAL(3u, MyRef(s.addEntry(4)).bufferId());
+ EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
s.holdBuffer(3); // hold current buffer
s.transferHoldLists(40);
@@ -286,8 +286,8 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() != 0);
s.switchActiveBuffer();
- EXPECT_EQUAL(0u, s.activeBufferId());
- EXPECT_EQUAL(0u, MyRef(s.addEntry(5)).bufferId());
+ EXPECT_EQ(0u, s.activeBufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
s.trimHoldLists(41);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
EXPECT_TRUE(s.getBufferState(1).size() == 0);
@@ -295,7 +295,7 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() == 0);
}
-TEST("require that we can hold and trim elements")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
{
MyStore s;
MyRef r1 = s.addEntry(1);
@@ -307,26 +307,42 @@ TEST("require that we can hold and trim elements")
MyRef r3 = s.addEntry(3);
s.holdElem(r3, 1);
s.transferHoldLists(30);
- EXPECT_EQUAL(1, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(1, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(11);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(31);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(0, s.getEntry(r2));
- EXPECT_EQUAL(0, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(0, s.getEntry(r2));
+ EXPECT_EQ(0, s.getEntry(r3));
}
+using IntHandle = Handle<int>;
+
MyRef
-toRef(Handle<int> handle)
+to_ref(IntHandle handle)
{
return MyRef(handle.ref);
}
-TEST("require that we can use free lists")
+std::ostream&
+operator<<(std::ostream &os, const IntHandle &rhs)
+{
+ MyRef ref(rhs.ref);
+ os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}";
+ return os;
+}
+
+void
+expect_successive_handles(const IntHandle &first, const IntHandle &second)
+{
+ EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset());
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists)
{
MyStore s;
s.enableFreeLists();
@@ -335,29 +351,54 @@ TEST("require that we can use free lists")
s.holdElem(h1.ref, 1);
s.transferHoldLists(10);
auto h2 = allocator.alloc(2);
+ expect_successive_handles(h1, h2);
s.holdElem(h2.ref, 1);
s.transferHoldLists(20);
s.trimElemHoldList(11);
auto h3 = allocator.alloc(3); // reuse h1.ref
- EXPECT_EQUAL(toRef(h1).offset(), toRef(h3).offset());
- EXPECT_EQUAL(toRef(h1).bufferId(), toRef(h3).bufferId());
+ EXPECT_EQ(h1, h3);
auto h4 = allocator.alloc(4);
- EXPECT_EQUAL(toRef(h2).offset() + 1, toRef(h4).offset());
+ expect_successive_handles(h2, h4);
s.trimElemHoldList(21);
auto h5 = allocator.alloc(5); // reuse h2.ref
- EXPECT_EQUAL(toRef(h2).offset(), toRef(h5).offset());
- EXPECT_EQUAL(toRef(h2).bufferId(), toRef(h5).bufferId());
+ EXPECT_EQ(h2, h5);
auto h6 = allocator.alloc(6);
- EXPECT_EQUAL(toRef(h4).offset() + 1, toRef(h6).offset());
- EXPECT_EQUAL(3, s.getEntry(h1.ref));
- EXPECT_EQUAL(5, s.getEntry(h2.ref));
- EXPECT_EQUAL(3, s.getEntry(h3.ref));
- EXPECT_EQUAL(4, s.getEntry(h4.ref));
- EXPECT_EQUAL(5, s.getEntry(h5.ref));
- EXPECT_EQUAL(6, s.getEntry(h6.ref));
+ expect_successive_handles(h4, h6);
+ EXPECT_EQ(3, s.getEntry(h1.ref));
+ EXPECT_EQ(5, s.getEntry(h2.ref));
+ EXPECT_EQ(3, s.getEntry(h3.ref));
+ EXPECT_EQ(4, s.getEntry(h4.ref));
+ EXPECT_EQ(5, s.getEntry(h5.ref));
+ EXPECT_EQ(6, s.getEntry(h6.ref));
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
+{
+ GrowStore<int, MyRef> grow_store(3, 64, 64, 64);
+ auto &s = grow_store.store();
+ s.enableFreeLists();
+ auto allocator = s.freeListRawAllocator<int>(grow_store.typeId());
+
+ auto h1 = allocator.alloc(3);
+ auto h2 = allocator.alloc(3);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h1.ref, 3);
+ s.holdElem(h2.ref, 3);
+ s.transferHoldLists(10);
+ s.trimElemHoldList(11);
+
+ auto h3 = allocator.alloc(3); // reuse h2.ref from free list
+ EXPECT_EQ(h2, h3);
+
+ auto h4 = allocator.alloc(3); // reuse h1.ref from free list
+ EXPECT_EQ(h1, h4);
+
+ auto h5 = allocator.alloc(3);
+ expect_successive_handles(h2, h5);
+ expect_successive_handles(h3, h5);
}
-TEST("require that memory stats are calculated")
+TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
DataStoreBase::MemStats m;
@@ -368,17 +409,17 @@ TEST("require that memory stats are calculated")
m._activeBuffers = 1;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// add entry
MyRef r = s.addEntry(10);
m._usedElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// inc dead
s.incDead(r, 1);
m._deadElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// hold buffer
s.addEntry(20);
@@ -389,7 +430,7 @@ TEST("require that memory stats are calculated")
m._holdElems += 2; // used - dead
m._activeBuffers--;
m._holdBuffers++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// new active buffer
s.switchActiveBuffer();
@@ -407,10 +448,10 @@ TEST("require that memory stats are calculated")
m._holdElems = 0;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
}
-TEST("require that memory usage is calculated")
+TEST(DataStoreTest, require_that_memory_usage_is_calculated)
{
MyStore s;
MyRef r = s.addEntry(10);
@@ -421,14 +462,14 @@ TEST("require that memory usage is calculated")
s.holdBuffer(r.bufferId());
s.transferHoldLists(100);
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(5 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(3 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold());
s.trimHoldLists(101);
}
-TEST("require that we can disable elemement hold list")
+TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
{
MyStore s;
MyRef r1 = s.addEntry(10);
@@ -436,23 +477,23 @@ TEST("require that we can disable elemement hold list")
MyRef r3 = s.addEntry(30);
(void) r3;
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(0 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
s.holdElem(r1, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.disableElemHoldList();
s.holdElem(r2, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.transferHoldLists(100);
s.trimHoldLists(101);
}
@@ -466,39 +507,39 @@ void assertGrowStats(GrowthStats expSizes,
size_t expInitMemUsage,
size_t minClusters, size_t numClustersForNewBuffer, size_t maxClusters = 128)
{
- EXPECT_EQUAL(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
- EXPECT_EQUAL(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
- EXPECT_EQUAL(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
+ EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
}
}
-TEST("require that buffer growth works")
+TEST(DataStoreTest, require_that_buffer_growth_works)
{
// Always switch to new buffer, min size 4
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 4 }, 20, 4, 0));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 0, 1, 2, 4 }, 4, 0, 4));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 16 }, 68, 16, 0));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 16 }, 68, 16, 0);
// Resize if buffer size is less than 16, min size 0
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 0, 1, 2, 4, 8, 16 }, 4, 0, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
// Resize if buffer size is less than 16, min size 4
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 4, 8, 16 }, 20, 4, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 4, 8, 16 }, 20, 4, 16);
// Always switch to new buffer, min size 0
- TEST_DO(assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
- { 0, 1 }, 4, 0, 0));
+ assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
+ { 0, 1 }, 4, 0, 0);
// Buffers with sizes larger than the huge page size of the mmap allocator.
- ASSERT_EQUAL(524288u, HUGE_PAGE_CLUSTER_SIZE);
- TEST_DO(assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
- { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
- 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5));
+ ASSERT_EQ(524288u, HUGE_PAGE_CLUSTER_SIZE);
+ assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
+ { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
+ 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5);
}
using RefType15 = EntryRefT<15>; // offsetSize=32768
@@ -512,12 +553,12 @@ void assertGrowStats(GrowthStats expSizes, uint32_t clusterSize)
uint32_t maxClusters = RefType15::offsetSize();
uint32_t numClustersForNewBuffer = 2048;
GrowStore<DataType, RefType15> store(clusterSize, minClusters, maxClusters, numClustersForNewBuffer);
- EXPECT_EQUAL(expSizes, store.getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size()));
}
}
-TEST("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size")
+TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size)
{
/*
* When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp):
@@ -539,7 +580,5 @@ TEST("require that offset in EntryRefT is within bounds when allocating memory b
}
}
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/engine/searchapi/searchapi_test.cpp b/searchlib/src/tests/engine/searchapi/searchapi_test.cpp
index 2f2cf4f22e5..ed103bf501c 100644
--- a/searchlib/src/tests/engine/searchapi/searchapi_test.cpp
+++ b/searchlib/src/tests/engine/searchapi/searchapi_test.cpp
@@ -237,27 +237,25 @@ void verify(vespalib::stringref expected, const vespalib::Slime & slime) {
TEST("verify trace") {
RelativeTime clock(std::make_unique<CountingClock>(fastos::TimeStamp::fromSec(1500000000), 1700000L));
Trace t(clock);
- verify("{"
- " traces: ["
- " ],"
- " start_time_utc: '2017-07-14 02:40:00.000 UTC'"
- "}",
- t.getSlime());
+ EXPECT_FALSE(t.hasTrace());
+ t.start(0);
+ EXPECT_TRUE(t.hasTrace());
t.createCursor("tag_a");
verify("{"
+ " start_time_utc: '2017-07-14 02:40:00.000 UTC',"
" traces: ["
" {"
" tag: 'tag_a',"
" timestamp_ms: 1.7"
" }"
- " ],"
- " start_time_utc: '2017-07-14 02:40:00.000 UTC'"
+ " ]"
"}",
t.getSlime());
Trace::Cursor & tagB = t.createCursor("tag_b");
tagB.setLong("long", 19);
t.done();
verify("{"
+ " start_time_utc: '2017-07-14 02:40:00.000 UTC',"
" traces: ["
" {"
" tag: 'tag_a',"
@@ -269,7 +267,6 @@ TEST("verify trace") {
" long: 19"
" }"
" ],"
- " start_time_utc: '2017-07-14 02:40:00.000 UTC',"
" duration_ms: 5.1"
"}",
t.getSlime());
diff --git a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
index 2e88f0e90b0..ab43e19251a 100644
--- a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
+++ b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
@@ -111,5 +111,25 @@ TEST_F("require that empty 3d tensor has size 1 in un-bound dimensions", Fixture
add({{"x", 0}, {"y", 1}, {"z", 0}}, 0));
}
+void
+assertClusterSize(const vespalib::string &tensorType, uint32_t expClusterSize) {
+ Fixture f(tensorType);
+ EXPECT_EQUAL(expClusterSize, f.store.getClusterSize());
+}
+
+TEST("require that cluster size is calculated correctly")
+{
+ TEST_DO(assertClusterSize("tensor(x[1])", 32));
+ TEST_DO(assertClusterSize("tensor(x[10])", 96));
+ TEST_DO(assertClusterSize("tensor(x[3])", 32));
+ TEST_DO(assertClusterSize("tensor(x[3],y[])", 32));
+ TEST_DO(assertClusterSize("tensor(x[3],y[],z[])", 32));
+ TEST_DO(assertClusterSize("tensor(x[3],y[],z[],z2[])", 64));
+ TEST_DO(assertClusterSize("tensor(x[10],y[10])", 800));
+ TEST_DO(assertClusterSize("tensor(x[])", 32));
+ TEST_DO(assertClusterSize("tensor(x[],x2[],x3[],x4[],x5[],x6[])", 32));
+ TEST_DO(assertClusterSize("tensor(x[],x2[],x3[],x4[],x5[],x6[],x7[])", 64));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.h b/searchlib/src/vespa/searchlib/datastore/datastore.h
index accff67e2fa..316ec34dc85 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.h
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.h
@@ -5,6 +5,7 @@
#include "allocator.h"
#include "datastorebase.h"
#include "free_list_allocator.h"
+#include "free_list_raw_allocator.h"
#include "raw_allocator.h"
namespace search::btree {
@@ -75,6 +76,9 @@ public:
template <typename EntryT>
RawAllocator<EntryT, RefT> rawAllocator(uint32_t typeId);
+ template <typename EntryT>
+ FreeListRawAllocator<EntryT, RefT> freeListRawAllocator(uint32_t typeId);
+
};
diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.hpp b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
index 74e3d9be2e0..39706957e2d 100644
--- a/searchlib/src/vespa/searchlib/datastore/datastore.hpp
+++ b/searchlib/src/vespa/searchlib/datastore/datastore.hpp
@@ -2,9 +2,10 @@
#pragma once
-#include "datastore.h"
#include "allocator.hpp"
+#include "datastore.h"
#include "free_list_allocator.hpp"
+#include "free_list_raw_allocator.hpp"
#include "raw_allocator.hpp"
#include <vespa/vespalib/util/array.hpp>
@@ -130,7 +131,13 @@ DataStoreT<RefT>::rawAllocator(uint32_t typeId)
return RawAllocator<EntryT, RefT>(*this, typeId);
}
-
+template <typename RefT>
+template <typename EntryT>
+FreeListRawAllocator<EntryT, RefT>
+DataStoreT<RefT>::freeListRawAllocator(uint32_t typeId)
+{
+ return FreeListRawAllocator<EntryT, RefT>(*this, typeId);
+}
template <typename EntryType, typename RefT>
DataStore<EntryType, RefT>::DataStore()
diff --git a/searchlib/src/vespa/searchlib/datastore/entryref.h b/searchlib/src/vespa/searchlib/datastore/entryref.h
index 457ffac4e26..918e514c8df 100644
--- a/searchlib/src/vespa/searchlib/datastore/entryref.h
+++ b/searchlib/src/vespa/searchlib/datastore/entryref.h
@@ -36,6 +36,7 @@ public:
static uint32_t numBuffers() { return 1 << BufferBits; }
static uint64_t align(uint64_t val) { return val; }
static uint64_t pad(uint64_t val) { (void) val; return 0ul; }
+ static constexpr bool isAlignedType = false;
};
/**
@@ -56,6 +57,7 @@ public:
static uint64_t offsetSize() { return ParentType::offsetSize() << OffsetAlign; }
static uint64_t align(uint64_t val) { return val + pad(val); }
static uint64_t pad(uint64_t val) { return (-val & PadConstant); }
+ static constexpr bool isAlignedType = true;
};
}
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
new file mode 100644
index 00000000000..514eecc25a8
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h
@@ -0,0 +1,33 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "raw_allocator.h"
+
+namespace search::datastore {
+
+/**
+ * Allocator used to allocate raw buffers (EntryT *) in an underlying data store
+ * with no construction or de-construction of elements in the buffer. Uses free lists if available.
+ *
+ * If free lists are enabled this allocator should only be used when
+ * allocating the same number of elements each time (equal to cluster size).
+ */
+template <typename EntryT, typename RefT>
+class FreeListRawAllocator : public RawAllocator<EntryT, RefT>
+{
+public:
+ using ParentType = RawAllocator<EntryT, RefT>;
+ using HandleType = typename ParentType::HandleType;
+
+private:
+ using ParentType::_store;
+ using ParentType::_typeId;
+
+public:
+ FreeListRawAllocator(DataStoreBase &store, uint32_t typeId);
+
+ HandleType alloc(size_t numElems);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
new file mode 100644
index 00000000000..662580ce4af
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp
@@ -0,0 +1,34 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "free_list_raw_allocator.h"
+
+namespace search::datastore {
+
+template <typename EntryT, typename RefT>
+FreeListRawAllocator<EntryT, RefT>::FreeListRawAllocator(DataStoreBase &store, uint32_t typeId)
+ : ParentType(store, typeId)
+{
+}
+
+template <typename EntryT, typename RefT>
+typename FreeListRawAllocator<EntryT, RefT>::HandleType
+FreeListRawAllocator<EntryT, RefT>::alloc(size_t numElems)
+{
+ BufferState::FreeListList &freeListList = _store.getFreeList(_typeId);
+ if (freeListList._head == nullptr) {
+ return ParentType::alloc(numElems);
+ }
+ BufferState &state = *freeListList._head;
+ assert(state.isActive());
+ assert(state.getClusterSize() == numElems);
+ RefT ref = state.popFreeList();
+ // If entry ref is not aligned we must scale the offset according to cluster size as it was divided when the entry ref was created.
+ uint64_t offset = !RefT::isAlignedType ? ref.offset() * state.getClusterSize() : ref.offset();
+ EntryT *entry = _store.template getBufferEntry<EntryT>(ref.bufferId(), offset);
+ return HandleType(ref, entry);
+}
+
+}
+
diff --git a/searchlib/src/vespa/searchlib/datastore/handle.h b/searchlib/src/vespa/searchlib/datastore/handle.h
index c0dce8d3d75..49eb4843816 100644
--- a/searchlib/src/vespa/searchlib/datastore/handle.h
+++ b/searchlib/src/vespa/searchlib/datastore/handle.h
@@ -16,6 +16,10 @@ struct Handle
EntryT *data;
Handle(EntryRef ref_, EntryT *data_) : ref(ref_), data(data_) {}
Handle() : ref(), data() {}
+ bool operator==(const Handle<EntryT> &rhs) const {
+ return ref == rhs.ref &&
+ data == rhs.data;
+ }
};
}
diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
index d0b7d1d1ca2..b7c00f75580 100644
--- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
+++ b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h
@@ -18,7 +18,7 @@ class RawAllocator
public:
using HandleType = Handle<EntryT>;
-private:
+protected:
DataStoreBase &_store;
uint32_t _typeId;
diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp
index 1c72d793ec6..8ec44bee71c 100644
--- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp
+++ b/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp
@@ -25,7 +25,15 @@ RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
size_t oldBufferSize = state.size();
EntryT *buffer = _store.getBufferEntry<EntryT>(activeBufferId, oldBufferSize);
state.pushed_back(numElems);
- return HandleType(RefT(oldBufferSize, activeBufferId), buffer);
+ if (RefT::isAlignedType) {
+ // AlignedEntryRef constructor scales down offset by alignment
+ return HandleType(RefT(oldBufferSize, activeBufferId), buffer);
+ } else {
+ // Must perform scaling ourselves, according to cluster size
+ size_t clusterSize = state.getClusterSize();
+ assert((numElems % clusterSize) == 0u);
+ return HandleType(RefT(oldBufferSize / clusterSize, activeBufferId), buffer);
+ }
}
}
diff --git a/searchlib/src/vespa/searchlib/engine/packetconverter.cpp b/searchlib/src/vespa/searchlib/engine/packetconverter.cpp
index c35ce9ded05..863c204f26c 100644
--- a/searchlib/src/vespa/searchlib/engine/packetconverter.cpp
+++ b/searchlib/src/vespa/searchlib/engine/packetconverter.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "packetconverter.h"
-#include <vespa/searchlib/fef/indexproperties.h>
#include <vespa/log/log.h>
LOG_SETUP(".engine.packetconverter");
@@ -74,7 +73,6 @@ PacketConverter::toSearchRequest(const QUERYX &packet, SearchRequest &request)
request.location = packet._location;
request.stackItems = packet._numStackItems;
request.stackDump.assign( packet._stackDump.begin(), packet._stackDump.end());
- request.setTraceLevel(search::fef::indexproperties::trace::Level::lookup(request.propertiesMap.modelOverrides()));
}
void
diff --git a/searchlib/src/vespa/searchlib/engine/request.h b/searchlib/src/vespa/searchlib/engine/request.h
index 733043b0e4e..ab46b5d40fe 100644
--- a/searchlib/src/vespa/searchlib/engine/request.h
+++ b/searchlib/src/vespa/searchlib/engine/request.h
@@ -30,7 +30,10 @@ public:
bool should_drop_sort_data() const;
- Request & setTraceLevel(uint32_t level) { _trace.setLevel(level); return *this; }
+ void setTraceLevel(uint32_t level, uint32_t minLevel) const {
+ _trace.setLevel(level);
+ _trace.start(minLevel);
+ }
Trace & trace() const { return _trace; }
private:
diff --git a/searchlib/src/vespa/searchlib/engine/trace.cpp b/searchlib/src/vespa/searchlib/engine/trace.cpp
index ba92902d2fd..f9564846104 100644
--- a/searchlib/src/vespa/searchlib/engine/trace.cpp
+++ b/searchlib/src/vespa/searchlib/engine/trace.cpp
@@ -10,30 +10,38 @@ RelativeTime::RelativeTime(std::unique_ptr<Clock> clock)
_clock(std::move(clock))
{}
-namespace {
-
-Trace::Cursor &
-createRoot(vespalib::Slime & slime, const RelativeTime & relativeTime) {
- Trace::Cursor & root = slime.setObject();
- root.setString("start_time_utc", relativeTime.timeOfDawn().toString());
- return root;
+void
+Trace::constructObject() const {
+ _trace = std::make_unique<vespalib::Slime>();
+ _root = & _trace->setObject();
}
+void
+Trace::constructTraces() const {
+ _traces = & root().setArray("traces");
}
+
Trace::Trace(const RelativeTime & relativeTime, uint32_t level)
- : _trace(std::make_unique<vespalib::Slime>()),
- _root(createRoot(*_trace, relativeTime)),
- _traces(_root.setArray("traces")),
+ : _trace(),
+ _root(nullptr),
+ _traces(nullptr),
_relativeTime(relativeTime),
_level(level)
{
}
+void
+Trace::start(int level) {
+ if (shouldTrace(level) && !hasTrace()) {
+ root().setString("start_time_utc", _relativeTime.timeOfDawn().toString());
+ }
+}
+
Trace::~Trace() = default;
Trace::Cursor &
Trace::createCursor(vespalib::stringref name) {
- Cursor & trace = _traces.addObject();
+ Cursor & trace = traces().addObject();
addTimeStamp(trace);
trace.setString("tag", name);
return trace;
@@ -48,7 +56,7 @@ void
Trace::addEvent(uint32_t level, vespalib::stringref event) {
if (!shouldTrace(level)) { return; }
- Cursor & trace = _traces.addObject();
+ Cursor & trace = traces().addObject();
addTimeStamp(trace);
trace.setString("event", event);
}
@@ -59,12 +67,14 @@ Trace::addTimeStamp(Cursor & trace) {
}
void Trace::done() {
- _root.setDouble("duration_ms", _relativeTime.timeSinceDawn()/1000000.0);
+ if (!hasTrace()) { return; }
+
+ root().setDouble("duration_ms", _relativeTime.timeSinceDawn()/1000000.0);
}
vespalib::string
Trace::toString() const {
- return _trace->toString();
+ return hasTrace() ? slime().toString() : "";
}
}
diff --git a/searchlib/src/vespa/searchlib/engine/trace.h b/searchlib/src/vespa/searchlib/engine/trace.h
index 41f2c608615..e5fea4f2b7f 100644
--- a/searchlib/src/vespa/searchlib/engine/trace.h
+++ b/searchlib/src/vespa/searchlib/engine/trace.h
@@ -57,6 +57,12 @@ public:
~Trace();
/**
+ * Will add start timestamp if level is high enough
+ * @param level
+ */
+ void start(int level);
+
+ /**
* Will give you a trace entry. It will also add a timestamp relative to the creation of the trace.
* @param name
* @return a Cursor to use for further tracing.
@@ -76,17 +82,38 @@ public:
void done();
vespalib::string toString() const;
- Cursor & getRoot() const { return _root; }
- vespalib::Slime & getSlime() const { return *_trace; }
+ bool hasTrace() const { return static_cast<bool>(_trace); }
+ Cursor & getRoot() const { return root(); }
+ vespalib::Slime & getSlime() const { return slime(); }
bool shouldTrace(uint32_t level) const { return level <= _level; }
uint32_t getLevel() const { return _level; }
Trace & setLevel(uint32_t level) { _level = level; return *this; }
const RelativeTime & getRelativeTime() const { return _relativeTime; }
private:
+ vespalib::Slime & slime() const {
+ if (!hasTrace()) {
+ constructObject();
+ }
+ return *_trace;
+ }
+ Cursor & root() const {
+ if (!hasTrace()) {
+ constructObject();
+ }
+ return *_root;
+ }
+ Cursor & traces() const {
+ if (!_traces) {
+ constructTraces();
+ }
+ return *_traces;
+ }
+ void constructObject() const;
+ void constructTraces() const;
void addTimeStamp(Cursor & trace);
- std::unique_ptr<vespalib::Slime> _trace;
- Cursor & _root;
- Cursor & _traces;
+ mutable std::unique_ptr<vespalib::Slime> _trace;
+ mutable Cursor * _root;
+ mutable Cursor * _traces;
const RelativeTime & _relativeTime;
uint32_t _level;
};
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
index 1c4e07e38ee..eae0f5364b8 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
@@ -17,13 +17,38 @@ using vespalib::eval::ValueType;
namespace search::tensor {
+namespace {
+
constexpr size_t MIN_BUFFER_CLUSTERS = 1024;
+constexpr size_t DENSE_TENSOR_ALIGNMENT = 32;
+
+}
+
+DenseTensorStore::TensorSizeCalc::TensorSizeCalc(const ValueType &type)
+ : _numBoundCells(1u),
+ _numUnboundDims(0u),
+ _cellSize(sizeof(double))
+{
+ for (const auto & dim : type.dimensions()) {
+ if (dim.is_bound()) {
+ _numBoundCells *= dim.size;
+ } else {
+ ++_numUnboundDims;
+ }
+ }
+}
-DenseTensorStore::BufferType::BufferType()
- : datastore::BufferType<char>(RefType::align(1),
- MIN_BUFFER_CLUSTERS,
- RefType::offsetSize() / RefType::align(1)),
- _unboundDimSizesSize(0u)
+size_t
+DenseTensorStore::TensorSizeCalc::clusterSize() const
+{
+ size_t tensorSize = _numBoundCells * _cellSize +
+ _numUnboundDims * sizeof(uint32_t);
+ return DenseTensorStore::BufferType::align(tensorSize, DENSE_TENSOR_ALIGNMENT);
+}
+
+DenseTensorStore::BufferType::BufferType(const TensorSizeCalc &tensorSizeCalc)
+ : datastore::BufferType<char>(tensorSizeCalc.clusterSize(), MIN_BUFFER_CLUSTERS, RefType::offsetSize()),
+ _unboundDimSizesSize(tensorSizeCalc._numUnboundDims * sizeof(uint32_t))
{}
DenseTensorStore::BufferType::~BufferType() = default;
@@ -40,30 +65,24 @@ size_t
DenseTensorStore::BufferType::getReservedElements(uint32_t bufferId) const
{
return datastore::BufferType<char>::getReservedElements(bufferId) +
- RefType::align(_unboundDimSizesSize);
+ align(_unboundDimSizesSize);
}
DenseTensorStore::DenseTensorStore(const ValueType &type)
: TensorStore(_concreteStore),
_concreteStore(),
- _bufferType(),
+ _tensorSizeCalc(type),
+ _bufferType(_tensorSizeCalc),
_type(type),
- _numBoundCells(1u),
- _numUnboundDims(0u),
- _cellSize(sizeof(double)),
_emptyCells()
{
- for (const auto & dim : _type.dimensions()) {
- if (dim.is_bound()) {
- _numBoundCells *= dim.size;
- } else {
- ++_numUnboundDims;
- }
- }
- _emptyCells.resize(_numBoundCells, 0.0);
- _bufferType.setUnboundDimSizesSize(_numUnboundDims * sizeof(uint32_t));
+ _emptyCells.resize(_tensorSizeCalc._numBoundCells, 0.0);
_store.addType(&_bufferType);
_store.initActiveBuffers();
+ if (_tensorSizeCalc._numUnboundDims == 0) {
+ // In this case each tensor use the same amount of memory and we can re-use previously allocated raw buffers by using free lists.
+ _store.enableFreeLists();
+ }
}
DenseTensorStore::~DenseTensorStore()
@@ -75,7 +94,7 @@ const void *
DenseTensorStore::getRawBuffer(RefType ref) const
{
return _store.getBufferEntry<char>(ref.bufferId(),
- ref.offset());
+ ref.offset() * _bufferType.getClusterSize());
}
@@ -83,8 +102,8 @@ size_t
DenseTensorStore::getNumCells(const void *buffer) const
{
const uint32_t *unboundDimSizeEnd = static_cast<const uint32_t *>(buffer);
- const uint32_t *unboundDimSizeStart = unboundDimSizeEnd - _numUnboundDims;
- size_t numCells = _numBoundCells;
+ const uint32_t *unboundDimSizeStart = unboundDimSizeEnd - _tensorSizeCalc._numUnboundDims;
+ size_t numCells = _tensorSizeCalc._numBoundCells;
for (auto unboundDimSize = unboundDimSizeStart; unboundDimSize != unboundDimSizeEnd; ++unboundDimSize) {
numCells *= *unboundDimSize;
}
@@ -103,9 +122,9 @@ void clearPadAreaAfterBuffer(char *buffer, size_t bufSize, size_t alignedBufSize
Handle<char>
DenseTensorStore::allocRawBuffer(size_t numCells)
{
- size_t bufSize = numCells * _cellSize;
+ size_t bufSize = numCells * _tensorSizeCalc._cellSize;
size_t alignedBufSize = alignedSize(numCells);
- auto result = _concreteStore.rawAllocator<char>(_typeId).alloc(alignedBufSize);
+ auto result = _concreteStore.freeListRawAllocator<char>(_typeId).alloc(alignedBufSize);
clearPadAreaAfterBuffer(result.data, bufSize, alignedBufSize, unboundDimSizesSize());
return result;
}
@@ -114,9 +133,9 @@ Handle<char>
DenseTensorStore::allocRawBuffer(size_t numCells,
const std::vector<uint32_t> &unboundDimSizes)
{
- assert(unboundDimSizes.size() == _numUnboundDims);
+ assert(unboundDimSizes.size() == _tensorSizeCalc._numUnboundDims);
auto ret = allocRawBuffer(numCells);
- if (_numUnboundDims > 0) {
+ if (_tensorSizeCalc._numUnboundDims > 0) {
memcpy(ret.data - unboundDimSizesSize(),
&unboundDimSizes[0], unboundDimSizesSize());
}
@@ -146,7 +165,7 @@ DenseTensorStore::move(EntryRef ref)
auto newraw = allocRawBuffer(numCells);
memcpy(newraw.data - unboundDimSizesSize(),
static_cast<const char *>(oldraw) - unboundDimSizesSize(),
- numCells * _cellSize + unboundDimSizesSize());
+ numCells * _tensorSizeCalc._cellSize + unboundDimSizesSize());
_concreteStore.holdElem(ref, alignedSize(numCells));
return newraw.ref;
}
@@ -173,11 +192,11 @@ DenseTensorStore::getTensor(EntryRef ref) const
}
auto raw = getRawBuffer(ref);
size_t numCells = getNumCells(raw);
- if (_numUnboundDims == 0) {
+ if (_tensorSizeCalc._numUnboundDims == 0) {
return std::make_unique<DenseTensorView>(_type, CellsRef(static_cast<const double *>(raw), numCells));
} else {
auto result = std::make_unique<MutableDenseTensorView>(_type, CellsRef(static_cast<const double *>(raw), numCells));
- makeConcreteType(*result, raw, _numUnboundDims);
+ makeConcreteType(*result, raw, _tensorSizeCalc._numUnboundDims);
return result;
}
}
@@ -187,15 +206,15 @@ DenseTensorStore::getTensor(EntryRef ref, MutableDenseTensorView &tensor) const
{
if (!ref.valid()) {
tensor.setCells(DenseTensorView::CellsRef(&_emptyCells[0], _emptyCells.size()));
- if (_numUnboundDims > 0) {
+ if (_tensorSizeCalc._numUnboundDims > 0) {
tensor.setUnboundDimensionsForEmptyTensor();
}
} else {
auto raw = getRawBuffer(ref);
size_t numCells = getNumCells(raw);
tensor.setCells(DenseTensorView::CellsRef(static_cast<const double *>(raw), numCells));
- if (_numUnboundDims > 0) {
- makeConcreteType(tensor, raw, _numUnboundDims);
+ if (_tensorSizeCalc._numUnboundDims > 0) {
+ makeConcreteType(tensor, raw, _tensorSizeCalc._numUnboundDims);
}
}
}
@@ -253,8 +272,8 @@ DenseTensorStore::setDenseTensor(const TensorType &tensor)
size_t numCells = tensor.cellsRef().size();
checkMatchingType(_type, tensor.type(), numCells);
auto raw = allocRawBuffer(numCells);
- setDenseTensorUnboundDimSizes(raw.data, _type, _numUnboundDims, tensor.type());
- memcpy(raw.data, &tensor.cellsRef()[0], numCells * _cellSize);
+ setDenseTensorUnboundDimSizes(raw.data, _type, _tensorSizeCalc._numUnboundDims, tensor.type());
+ memcpy(raw.data, &tensor.cellsRef()[0], numCells * _tensorSizeCalc._cellSize);
return raw.ref;
}
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
index 67a2dc7b8c0..55ddd2ec9e4 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
@@ -26,32 +26,41 @@ namespace search::tensor {
class DenseTensorStore : public TensorStore
{
public:
- // 32 entry alignment, entry type is char => 32 bytes alignment
- using RefType = datastore::AlignedEntryRefT<22, 5>;
+ using RefType = datastore::EntryRefT<22>;
using DataStoreType = datastore::DataStoreT<RefType>;
using ValueType = vespalib::eval::ValueType;
+ struct TensorSizeCalc
+ {
+ size_t _numBoundCells; // product of bound dimension sizes
+ uint32_t _numUnboundDims;
+ uint32_t _cellSize; // size of a cell (e.g. double => 8)
+
+ TensorSizeCalc(const ValueType &type);
+ size_t clusterSize() const;
+ };
+
class BufferType : public datastore::BufferType<char>
{
using CleanContext = datastore::BufferType<char>::CleanContext;
uint32_t _unboundDimSizesSize;
public:
- BufferType();
+ BufferType(const TensorSizeCalc &tensorSizeCalc);
~BufferType() override;
void cleanHold(void *buffer, uint64_t offset, uint64_t len, CleanContext cleanCtx) override;
uint32_t unboundDimSizesSize() const { return _unboundDimSizesSize; }
- void setUnboundDimSizesSize(uint32_t unboundDimSizesSize_in) {
- _unboundDimSizesSize = unboundDimSizesSize_in;
- }
size_t getReservedElements(uint32_t bufferId) const override;
+ static size_t align(size_t size, size_t alignment) {
+ size += alignment - 1;
+ return (size - (size % alignment));
+ }
+ size_t align(size_t size) const { return align(size, _clusterSize); }
};
private:
DataStoreType _concreteStore;
+ TensorSizeCalc _tensorSizeCalc;
BufferType _bufferType;
ValueType _type; // type of dense tensor
- size_t _numBoundCells; // product of bound dimension sizes
- uint32_t _numUnboundDims;
- uint32_t _cellSize; // size of a cell (e.g. double => 8)
std::vector<double> _emptyCells;
size_t unboundCells(const void *buffer) const;
@@ -61,7 +70,7 @@ private:
setDenseTensor(const TensorType &tensor);
datastore::Handle<char> allocRawBuffer(size_t numCells);
size_t alignedSize(size_t numCells) const {
- return RefType::align(numCells * _cellSize + unboundDimSizesSize());
+ return _bufferType.align(numCells * _tensorSizeCalc._cellSize + unboundDimSizesSize());
}
public:
@@ -71,7 +80,7 @@ public:
const ValueType &type() const { return _type; }
uint32_t unboundDimSizesSize() const { return _bufferType.unboundDimSizesSize(); }
size_t getNumCells(const void *buffer) const;
- uint32_t getCellSize() const { return _cellSize; }
+ uint32_t getCellSize() const { return _tensorSizeCalc._cellSize; }
const void *getRawBuffer(RefType ref) const;
datastore::Handle<char> allocRawBuffer(size_t numCells, const std::vector<uint32_t> &unboundDimSizes);
void holdTensor(EntryRef ref) override;
@@ -79,6 +88,8 @@ public:
std::unique_ptr<Tensor> getTensor(EntryRef ref) const;
void getTensor(EntryRef ref, vespalib::tensor::MutableDenseTensorView &tensor) const;
EntryRef setTensor(const Tensor &tensor);
+ // The following method is meant to be used only for unit tests.
+ uint32_t getClusterSize() const { return _bufferType.getClusterSize(); }
};
}