summaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/CMakeLists.txt1
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/OperationNode.java2
-rw-r--r--searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp6
-rw-r--r--searchlib/src/tests/memoryindex/datastore/word_store_test.cpp12
-rw-r--r--searchlib/src/tests/tensor/tensor_buffer_store/CMakeLists.txt9
-rw-r--r--searchlib/src/tests/tensor/tensor_buffer_store/tensor_buffer_store_test.cpp164
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.h9
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/word_store.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/word_store.h7
-rw-r--r--searchlib/src/vespa/searchlib/tensor/CMakeLists.txt4
-rw-r--r--searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp86
-rw-r--r--searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h40
-rw-r--r--searchlib/src/vespa/searchlib/tensor/serialized_fast_value_attribute.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp67
-rw-r--r--searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h40
-rw-r--r--searchlib/src/vespa/searchlib/tensor/streamed_value_saver.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp11
-rw-r--r--searchlib/src/vespa/searchlib/tensor/streamed_value_store.h5
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.cpp97
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.h37
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.cpp47
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.h35
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp11
24 files changed, 687 insertions, 54 deletions
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index 76dadc5605e..a7d831aa623 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -224,6 +224,7 @@ vespa_define_module(
src/tests/tensor/hnsw_index
src/tests/tensor/hnsw_saver
src/tests/tensor/tensor_buffer_operations
+ src/tests/tensor/tensor_buffer_store
src/tests/transactionlog
src/tests/transactionlogstress
src/tests/true
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/OperationNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/OperationNode.java
index 0512e1dad2f..1c66686a9fe 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/OperationNode.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/OperationNode.java
@@ -80,7 +80,7 @@ public final class OperationNode extends CompositeNode {
@Override
public TensorType type(TypeContext<Reference> context) {
- // Compute type using tensor types as arithmetic operators are supported on tensors
+ // Compute type using tensor types as operation operators are supported on tensors
// and is correct also in the special case of doubles.
// As all our functions are type-commutative, we don't need to take operator precedence into account
TensorType type = children.get(0).type(context);
diff --git a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
index 34f9f7d27a9..564824031a6 100644
--- a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
@@ -90,8 +90,7 @@ TEST_F(FeatureStoreTest, features_can_be_added_and_retrieved)
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQ(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(1u, FeatureStore::RefType(r1).offset());
EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
@@ -131,8 +130,7 @@ TEST_F(FeatureStoreTest, next_words_are_working)
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQ(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(1u, FeatureStore::RefType(r1).offset());
EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
diff --git a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
index 698780a1dc2..1ca87467fc6 100644
--- a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
@@ -18,14 +18,14 @@ TEST(WordStoreTest, words_can_be_added_and_retrieved)
EntryRef r1 = ws.addWord(w1);
EntryRef r2 = ws.addWord(w2);
EntryRef r3 = ws.addWord(w3);
- uint32_t invp = WordStore::RefType::align(1); // Reserved as invalid
+ uint32_t invp = WordStore::buffer_array_size; // Reserved as invalid
uint32_t w1s = w1.size() + 1;
- uint32_t w1p = WordStore::RefType::pad(w1s);
+ uint32_t w1p = WordStore::calc_pad(w1s);
uint32_t w2s = w2.size() + 1;
- uint32_t w2p = WordStore::RefType::pad(w2s);
- EXPECT_EQ(invp, WordStore::RefType(r1).offset());
- EXPECT_EQ(invp + w1s + w1p, WordStore::RefType(r2).offset());
- EXPECT_EQ(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset());
+ uint32_t w2p = WordStore::calc_pad(w2s);
+ EXPECT_EQ(invp, WordStore::RefType(r1).offset() * WordStore::buffer_array_size);
+ EXPECT_EQ(invp + w1s + w1p, WordStore::RefType(r2).offset() * WordStore::buffer_array_size);
+ EXPECT_EQ(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset() * WordStore::buffer_array_size);
EXPECT_EQ(0u, WordStore::RefType(r1).bufferId());
EXPECT_EQ(0u, WordStore::RefType(r2).bufferId());
EXPECT_EQ(0u, WordStore::RefType(r3).bufferId());
diff --git a/searchlib/src/tests/tensor/tensor_buffer_store/CMakeLists.txt b/searchlib/src/tests/tensor/tensor_buffer_store/CMakeLists.txt
new file mode 100644
index 00000000000..749d38a1383
--- /dev/null
+++ b/searchlib/src/tests/tensor/tensor_buffer_store/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchlib_tensor_buffer_store_test_app TEST
+ SOURCES
+ tensor_buffer_store_test.cpp
+ DEPENDS
+ searchlib
+ GTest::GTest
+)
+vespa_add_test(NAME searchlib_tensor_buffer_store_test_app COMMAND searchlib_tensor_buffer_store_test_app)
diff --git a/searchlib/src/tests/tensor/tensor_buffer_store/tensor_buffer_store_test.cpp b/searchlib/src/tests/tensor/tensor_buffer_store/tensor_buffer_store_test.cpp
new file mode 100644
index 00000000000..101b84e01aa
--- /dev/null
+++ b/searchlib/src/tests/tensor/tensor_buffer_store/tensor_buffer_store_test.cpp
@@ -0,0 +1,164 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchlib/tensor/tensor_buffer_store.h>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using search::tensor::TensorBufferStore;
+using vespalib::datastore::EntryRef;
+using vespalib::eval::SimpleValue;
+using vespalib::eval::TensorSpec;
+using vespalib::eval::Value;
+using vespalib::eval::ValueType;
+
+const vespalib::string tensor_type_spec("tensor(x{})");
+
+class TensorBufferStoreTest : public testing::Test
+{
+protected:
+ ValueType _tensor_type;
+ TensorBufferStore _store;
+ TensorBufferStoreTest();
+ ~TensorBufferStoreTest() override;
+ EntryRef store_tensor(const Value& tensor);
+ EntryRef store_tensor(const TensorSpec& spec);
+ std::unique_ptr<Value> load_tensor(EntryRef ref);
+ TensorSpec load_tensor_spec(EntryRef ref);
+ vespalib::nbostream encode_stored_tensor(EntryRef ref);
+ void assert_store_load(const TensorSpec& tensor_spec);
+ void assert_store_load_many(const TensorSpec& tensor_spec);
+ void assert_store_move_load(const TensorSpec& tensor_spec);
+ void assert_store_encode_store_encoded_load(const TensorSpec& tensor_spec);
+};
+
+TensorBufferStoreTest::TensorBufferStoreTest()
+ : testing::Test(),
+ _tensor_type(ValueType::from_spec(tensor_type_spec)),
+ _store(_tensor_type, {}, 4)
+{
+}
+
+TensorBufferStoreTest::~TensorBufferStoreTest() = default;
+
+EntryRef
+TensorBufferStoreTest::store_tensor(const Value& tensor)
+{
+ EXPECT_EQ(_tensor_type, tensor.type());
+ return _store.store_tensor(tensor);
+}
+
+EntryRef
+TensorBufferStoreTest::store_tensor(const TensorSpec& spec)
+{
+ auto tensor = SimpleValue::from_spec(spec);
+ return store_tensor(*tensor);
+}
+
+std::unique_ptr<Value>
+TensorBufferStoreTest::load_tensor(EntryRef ref)
+{
+ return _store.get_tensor(ref);
+}
+
+vespalib::nbostream
+TensorBufferStoreTest::encode_stored_tensor(EntryRef ref)
+{
+ vespalib::nbostream out;
+ _store.encode_stored_tensor(ref, out);
+ return out;
+}
+
+TensorSpec
+TensorBufferStoreTest::load_tensor_spec(EntryRef ref)
+{
+ auto loaded = load_tensor(ref);
+ return TensorSpec::from_value(*loaded);
+}
+
+void
+TensorBufferStoreTest::assert_store_load(const TensorSpec& tensor_spec)
+{
+ auto ref = store_tensor(tensor_spec);
+ auto loaded_spec = load_tensor_spec(ref);
+ _store.holdTensor(ref);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+}
+
+void
+TensorBufferStoreTest::assert_store_load_many(const TensorSpec& tensor_spec)
+{
+ constexpr uint32_t cnt = 2000;
+ std::vector<EntryRef> refs;
+ for (uint32_t i = 0; i < cnt; ++i) {
+ refs.emplace_back(store_tensor(tensor_spec));
+ }
+ for (auto ref : refs) {
+ auto loaded_spec = load_tensor_spec(ref);
+ _store.holdTensor(ref);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+ }
+}
+
+void
+TensorBufferStoreTest::assert_store_move_load(const TensorSpec& tensor_spec)
+{
+ auto ref = store_tensor(tensor_spec);
+ auto ref2 = _store.move(ref);
+ EXPECT_NE(ref, ref2);
+ auto loaded_spec = load_tensor_spec(ref2);
+ _store.holdTensor(ref2);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+}
+
+void
+TensorBufferStoreTest::assert_store_encode_store_encoded_load(const TensorSpec& tensor_spec)
+{
+ auto ref = store_tensor(tensor_spec);
+ auto encoded = encode_stored_tensor(ref);
+ _store.holdTensor(ref);
+ auto ref2 = _store.store_encoded_tensor(encoded);
+ EXPECT_NE(ref, ref2);
+ auto loaded_spec = load_tensor_spec(ref2);
+ _store.holdTensor(ref2);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+}
+
+std::vector<TensorSpec> tensor_specs = {
+ TensorSpec(tensor_type_spec),
+ TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5),
+ TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5).add({{"x", "b"}}, 5.5),
+ TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5).add({{"x", "b"}}, 5.5).add({{"x", "c"}}, 6.5),
+ TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5).add({{"x", "b"}}, 5.5).add({{"x", "c"}}, 6.5).add({{"x", "d"}}, 7.5)
+};
+
+TEST_F(TensorBufferStoreTest, tensor_can_be_stored_and_loaded)
+{
+ for (auto& tensor_spec : tensor_specs) {
+ assert_store_load(tensor_spec);
+ }
+}
+
+TEST_F(TensorBufferStoreTest, tensor_can_be_stored_and_loaded_many_times)
+{
+ for (auto& tensor_spec : tensor_specs) {
+ assert_store_load_many(tensor_spec);
+ }
+}
+
+TEST_F(TensorBufferStoreTest, stored_tensor_can_be_copied)
+{
+ for (auto& tensor_spec : tensor_specs) {
+ assert_store_move_load(tensor_spec);
+ }
+}
+
+TEST_F(TensorBufferStoreTest, stored_tensor_can_be_encoded_and_stored_as_encoded_and_loaded)
+{
+ for (auto& tensor_spec : tensor_specs) {
+ assert_store_encode_store_encoded_load(tensor_spec);
+ }
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
index b37300375a8..b5810d06047 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
@@ -9,6 +9,7 @@ namespace search::memoryindex {
constexpr size_t MIN_BUFFER_ARRAYS = 1024u;
using index::SchemaUtil;
+using vespalib::datastore::EntryRef;
uint64_t
FeatureStore::writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
@@ -26,10 +27,10 @@ FeatureStore::writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &featur
return oldOffset;
}
-vespalib::datastore::EntryRef
+EntryRef
FeatureStore::addFeatures(const uint8_t *src, uint64_t byteLen)
{
- uint32_t pad = RefType::pad(byteLen);
+ uint32_t pad = calc_pad(byteLen);
auto result = _store.rawAllocator<uint8_t>(_typeId).alloc(byteLen + pad, DECODE_SAFETY);
uint8_t *dst = result.data;
memcpy(dst, src, byteLen);
@@ -42,7 +43,7 @@ FeatureStore::addFeatures(const uint8_t *src, uint64_t byteLen)
return result.ref;
}
-std::pair<vespalib::datastore::EntryRef, uint64_t>
+std::pair<EntryRef, uint64_t>
FeatureStore::addFeatures(uint64_t beginOffset, uint64_t endOffset)
{
uint64_t bitLen = (endOffset - beginOffset);
@@ -52,18 +53,18 @@ FeatureStore::addFeatures(uint64_t beginOffset, uint64_t endOffset)
assert(wordLen > 0);
assert(byteLen > 0);
const uint8_t *src = reinterpret_cast<const uint8_t *>(_f._valI - wordLen);
- RefType ref = addFeatures(src, byteLen);
+ EntryRef ref = addFeatures(src, byteLen);
return std::make_pair(ref, bitLen);
}
-vespalib::datastore::EntryRef
-FeatureStore::moveFeatures(vespalib::datastore::EntryRef ref, uint64_t bitLen)
+EntryRef
+FeatureStore::moveFeatures(EntryRef ref, uint64_t bitLen)
{
const uint8_t *src = getBits(ref);
uint64_t byteLen = (bitLen + 7) / 8;
- RefType newRef = addFeatures(src, byteLen);
+ EntryRef newRef = addFeatures(src, byteLen);
// Mark old features as dead
- _store.incDead(ref, byteLen + RefType::pad(byteLen));
+ _store.incDead(ref, byteLen + calc_pad(byteLen));
return newRef;
}
@@ -74,8 +75,7 @@ FeatureStore::FeatureStore(const Schema &schema)
_d(nullptr),
_fieldsParams(),
_schema(schema),
- _type(RefType::align(1u), MIN_BUFFER_ARRAYS,
- RefType::offsetSize() / RefType::align(1u)),
+ _type(buffer_array_size, MIN_BUFFER_ARRAYS, RefType::offsetSize()),
_typeId(0)
{
_f.setWriteContext(&_fctx);
@@ -96,7 +96,7 @@ FeatureStore::~FeatureStore()
_store.dropBuffers();
}
-std::pair<vespalib::datastore::EntryRef, uint64_t>
+std::pair<EntryRef, uint64_t>
FeatureStore::addFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
{
uint64_t oldOffset = writeFeatures(packedIndex, features);
@@ -109,14 +109,14 @@ void
FeatureStore::add_features_guard_bytes()
{
uint32_t len = DECODE_SAFETY;
- uint32_t pad = RefType::pad(len);
- auto result = _store.rawAllocator<int8_t>(_typeId).alloc(len + pad);
+ uint32_t pad = calc_pad(len);
+ auto result = _store.rawAllocator<uint8_t>(_typeId).alloc(len + pad);
memset(result.data, 0, len + pad);
_store.incDead(result.ref, len + pad);
}
void
-FeatureStore::getFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef ref, DocIdAndFeatures &features)
+FeatureStore::getFeatures(uint32_t packedIndex, EntryRef ref, DocIdAndFeatures &features)
{
setupForField(packedIndex, _d);
setupForReadFeatures(ref, _d);
@@ -124,7 +124,7 @@ FeatureStore::getFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef re
}
size_t
-FeatureStore::bitSize(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
+FeatureStore::bitSize(uint32_t packedIndex, EntryRef ref)
{
setupForField(packedIndex, _d);
setupForUnpackFeatures(ref, _d);
@@ -136,8 +136,8 @@ FeatureStore::bitSize(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
return bitLen;
}
-vespalib::datastore::EntryRef
-FeatureStore::moveFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
+EntryRef
+FeatureStore::moveFeatures(uint32_t packedIndex, EntryRef ref)
{
uint64_t bitLen = bitSize(packedIndex, ref);
return moveFeatures(ref, bitLen);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
index a96ae9a8f2d..b1d975d0926 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
@@ -14,11 +14,14 @@ namespace search::memoryindex {
*/
class FeatureStore {
public:
- using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::AlignedEntryRefT<22, 2>>;
+ using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::EntryRefT<22>>;
using RefType = DataStoreType::RefType;
using EncodeContext = bitcompression::EG2PosOccEncodeContext<true>;
using DecodeContextCooked = bitcompression::EG2PosOccDecodeContextCooked<true>;
using generation_t = vespalib::GenerationHandler::generation_t;
+ static constexpr uint32_t buffer_array_size = 4u; // Must be a power of 2
+ static constexpr uint32_t pad_constant = buffer_array_size - 1u;
+ static uint32_t calc_pad(uint32_t val) { return (-val & pad_constant); }
private:
using Schema = index::Schema;
@@ -154,7 +157,7 @@ public:
uint32_t bufferId = RefType(ref).bufferId();
const vespalib::datastore::BufferState &state = _store.getBufferState(bufferId);
decoder.setEnd(
- ((_store.getEntry<uint8_t>(RefType(0, bufferId)) + state.size() -
+ ((_store.getEntryArray<uint8_t>(RefType(0, bufferId), buffer_array_size) + state.size() -
bits) + 7) / 8,
false);
}
@@ -188,7 +191,7 @@ public:
*/
const uint8_t *getBits(vespalib::datastore::EntryRef ref) const {
RefType iRef(ref);
- return _store.getEntry<uint8_t>(iRef);
+ return _store.getEntryArray<uint8_t>(iRef, buffer_array_size);
}
/**
diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
index e5ec4ab7808..441587eb718 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
@@ -10,16 +10,13 @@ constexpr size_t MIN_BUFFER_ARRAYS = 1024;
WordStore::WordStore()
: _store(),
_numWords(0),
- _type(RefType::align(1),
- MIN_BUFFER_ARRAYS,
- RefType::offsetSize() / RefType::align(1)),
+ _type(buffer_array_size, MIN_BUFFER_ARRAYS, RefType::offsetSize()),
_typeId(0)
{
_store.addType(&_type);
_store.init_primary_buffers();
}
-
WordStore::~WordStore()
{
_store.dropBuffers();
@@ -29,7 +26,7 @@ vespalib::datastore::EntryRef
WordStore::addWord(const vespalib::stringref word)
{
size_t wordSize = word.size() + 1;
- size_t bufferSize = RefType::align(wordSize);
+ size_t bufferSize = wordSize + calc_pad(wordSize);
auto result = _store.rawAllocator<char>(_typeId).alloc(bufferSize);
char *be = result.data;
for (size_t i = 0; i < word.size(); ++i) {
diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.h b/searchlib/src/vespa/searchlib/memoryindex/word_store.h
index b27ae65d776..913f6bc3ea5 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/word_store.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.h
@@ -9,8 +9,11 @@ namespace search::memoryindex {
class WordStore {
public:
- using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::AlignedEntryRefT<22, 2>>;
+ using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::EntryRefT<22>>;
using RefType = DataStoreType::RefType;
+ static constexpr uint32_t buffer_array_size = 4u; // Must be a power of 2
+ static constexpr uint32_t pad_constant = buffer_array_size - 1u;
+ static uint32_t calc_pad(uint32_t val) { return (-val & pad_constant); }
private:
DataStoreType _store;
@@ -24,7 +27,7 @@ public:
vespalib::datastore::EntryRef addWord(const vespalib::stringref word);
const char *getWord(vespalib::datastore::EntryRef ref) const {
RefType internalRef(ref);
- return _store.getEntry<char>(internalRef);
+ return _store.getEntryArray<char>(internalRef, buffer_array_size);
}
vespalib::MemoryUsage getMemoryUsage() const {
diff --git a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
index 7815ef7e770..46bfc0909aa 100644
--- a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
@@ -24,13 +24,17 @@ vespa_add_library(searchlib_tensor OBJECT
imported_tensor_attribute_vector_read_guard.cpp
inner_product_distance.cpp
inv_log_level_generator.cpp
+ large_subspaces_buffer_type.cpp
nearest_neighbor_index.cpp
nearest_neighbor_index_saver.cpp
serialized_fast_value_attribute.cpp
+ small_subspaces_buffer_type.cpp
streamed_value_saver.cpp
streamed_value_store.cpp
tensor_attribute.cpp
tensor_buffer_operations.cpp
+ tensor_buffer_store.cpp
+ tensor_buffer_type_mapper.cpp
tensor_deserialize.cpp
tensor_store.cpp
reusable_set_visited_tracker.cpp
diff --git a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp
new file mode 100644
index 00000000000..cdd4d35c1df
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.cpp
@@ -0,0 +1,86 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "large_subspaces_buffer_type.h"
+#include "tensor_buffer_operations.h"
+#include "tensor_buffer_type_mapper.h"
+#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/util/array.hpp>
+#include <vespa/vespalib/util/arrayref.h>
+
+using vespalib::alloc::MemoryAllocator;
+
+namespace search::tensor {
+
+LargeSubspacesBufferType::LargeSubspacesBufferType(const AllocSpec& spec, std::shared_ptr<MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept
+ : ParentType(1u, spec.minArraysInBuffer, spec.maxArraysInBuffer, spec.numArraysForNewBuffer, spec.allocGrowFactor),
+ _memory_allocator(std::move(memory_allocator)),
+ _ops(type_mapper.get_tensor_buffer_operations())
+{
+}
+
+LargeSubspacesBufferType::~LargeSubspacesBufferType() = default;
+
+void
+LargeSubspacesBufferType::cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx)
+{
+ auto elem = static_cast<ArrayType*>(buffer) + offset;
+ for (size_t i = 0; i < numElems; ++i) {
+ if (!elem->empty()) {
+ cleanCtx.extraBytesCleaned(elem->size());
+ _ops.reclaim_labels({elem->data(), elem->size()});
+ ArrayType().swap(*elem);
+ }
+ ++elem;
+ }
+}
+
+void
+LargeSubspacesBufferType::destroyElements(void *buffer, ElemCount numElems)
+{
+ auto elem = static_cast<ArrayType*>(buffer);
+ for (size_t i = 0; i < numElems; ++i) {
+ if (!elem->empty()) {
+ _ops.reclaim_labels({elem->data(), elem->size()});
+ ArrayType().swap(*elem);
+ }
+ ++elem;
+ }
+}
+
+void
+LargeSubspacesBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+{
+ auto old_elems = static_cast<const ArrayType*>(oldBuffer);
+ auto new_elems = static_cast<ArrayType*>(newBuffer);
+ for (size_t i = 0; i < numElems; ++i) {
+ auto& old_elem = old_elems[i];
+ new (new_elems + i) ArrayType(old_elem);
+ if (!old_elem.empty()) {
+ _ops.copied_labels({old_elem.data(), old_elem.size()});
+ }
+ }
+}
+
+void
+LargeSubspacesBufferType::initializeReservedElements(void *buffer, ElemCount reservedElements)
+{
+ auto new_elems = static_cast<ArrayType*>(buffer);
+ const auto& empty = empty_entry();
+ for (size_t i = 0; i < reservedElements; ++i) {
+ new (new_elems + i) ArrayType(empty);
+ }
+}
+
+const vespalib::alloc::MemoryAllocator*
+LargeSubspacesBufferType::get_memory_allocator() const
+{
+ return _memory_allocator.get();
+}
+
+}
+
+namespace vespalib::datastore {
+
+template class BufferType<Array<char>>;
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h
new file mode 100644
index 00000000000..cfab8ef20af
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/large_subspaces_buffer_type.h
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/datastore/array_store_config.h>
+#include <vespa/vespalib/datastore/buffer_type.h>
+#include <vespa/vespalib/util/array.h>
+#include <memory>
+
+namespace vespalib::alloc { class MemoryAllocator; }
+
+namespace search::tensor {
+
+class TensorBufferOperations;
+class TensorBufferTypeMapper;
+
+/*
+ * Class representing buffer type for tensors with a large number of
+ * subspaces in array store. Tensor buffers are externally allocated
+ * (cf. vespalib::Array).
+ */
+class LargeSubspacesBufferType : public vespalib::datastore::BufferType<vespalib::Array<char>>
+{
+ using AllocSpec = vespalib::datastore::ArrayStoreConfig::AllocSpec;
+ using ArrayType = vespalib::Array<char>;
+ using ParentType = vespalib::datastore::BufferType<ArrayType>;
+ using CleanContext = typename ParentType::CleanContext;
+ std::shared_ptr<vespalib::alloc::MemoryAllocator> _memory_allocator;
+ TensorBufferOperations& _ops;
+public:
+ LargeSubspacesBufferType(const AllocSpec& spec, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept;
+ ~LargeSubspacesBufferType() override;
+ void cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void destroyElements(void *buffer, ElemCount numElems) override;
+ void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
+ void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
+ const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/serialized_fast_value_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/serialized_fast_value_attribute.cpp
index 2233eb77e89..3e9f41c812c 100644
--- a/searchlib/src/vespa/searchlib/tensor/serialized_fast_value_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/serialized_fast_value_attribute.cpp
@@ -48,13 +48,7 @@ SerializedFastValueAttribute::getTensor(DocId docId) const
if (docId < getCommittedDocIdLimit()) {
ref = acquire_entry_ref(docId);
}
- if (!ref.valid()) {
- return {};
- }
- if (const auto * ptr = _streamedValueStore.get_tensor_entry(ref)) {
- return ptr->create_fast_value_view(_tensor_type);
- }
- return {};
+ return _streamedValueStore.get_tensor(ref);
}
bool
diff --git a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp
new file mode 100644
index 00000000000..adbd3dee2b7
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.cpp
@@ -0,0 +1,67 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "small_subspaces_buffer_type.h"
+#include "tensor_buffer_operations.h"
+#include "tensor_buffer_type_mapper.h"
+#include <vespa/vespalib/util/arrayref.h>
+
+using vespalib::alloc::MemoryAllocator;
+
+namespace search::tensor {
+
+SmallSubspacesBufferType::SmallSubspacesBufferType(uint32_t array_size, const AllocSpec& spec, std::shared_ptr<MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept
+ : ParentType(array_size, spec.minArraysInBuffer, spec.maxArraysInBuffer, spec.numArraysForNewBuffer, spec.allocGrowFactor),
+ _memory_allocator(std::move(memory_allocator)),
+ _ops(type_mapper.get_tensor_buffer_operations())
+{
+}
+
+SmallSubspacesBufferType::~SmallSubspacesBufferType() = default;
+
+void
+SmallSubspacesBufferType::cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext)
+{
+ char* elem = static_cast<char *>(buffer) + offset;
+ while (numElems >= getArraySize()) {
+ _ops.reclaim_labels(vespalib::ArrayRef<char>(elem, getArraySize()));
+ elem += getArraySize();
+ numElems -= getArraySize();
+ }
+}
+
+void
+SmallSubspacesBufferType::destroyElements(void *buffer, ElemCount numElems)
+{
+ char* elem = static_cast<char *>(buffer);
+ while (numElems >= getArraySize()) {
+ _ops.reclaim_labels(vespalib::ArrayRef<char>(elem, getArraySize()));
+ elem += getArraySize();
+ numElems -= getArraySize();
+ }
+}
+
+void
+SmallSubspacesBufferType::fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems)
+{
+ memcpy(newBuffer, oldBuffer, numElems);
+ const char *elem = static_cast<const char *>(oldBuffer);
+ while (numElems >= getArraySize()) {
+ _ops.copied_labels(vespalib::ConstArrayRef<char>(elem, getArraySize()));
+ elem += getArraySize();
+ numElems -= getArraySize();
+ }
+}
+
+void
+SmallSubspacesBufferType::initializeReservedElements(void *buffer, ElemCount reservedElements)
+{
+ memset(buffer, 0, reservedElements);
+}
+
+const vespalib::alloc::MemoryAllocator*
+SmallSubspacesBufferType::get_memory_allocator() const
+{
+ return _memory_allocator.get();
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h
new file mode 100644
index 00000000000..a778183c5a2
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/small_subspaces_buffer_type.h
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/datastore/array_store_config.h>
+#include <vespa/vespalib/datastore/buffer_type.h>
+#include <memory>
+
+namespace vespalib::alloc { class MemoryAllocator; }
+
+namespace search::tensor {
+
+class TensorBufferOperations;
+class TensorBufferTypeMapper;
+
+/*
+ * Class representing buffer type for tensors with a small number of
+ * subspaces in array store. Tensor buffers are internal in data store buffer.
+ */
+class SmallSubspacesBufferType : public vespalib::datastore::BufferType<char>
+{
+ using AllocSpec = vespalib::datastore::ArrayStoreConfig::AllocSpec;
+ using ParentType = vespalib::datastore::BufferType<char>;
+ std::shared_ptr<vespalib::alloc::MemoryAllocator> _memory_allocator;
+ TensorBufferOperations& _ops;
+public:
+ SmallSubspacesBufferType(const SmallSubspacesBufferType&) = delete;
+ SmallSubspacesBufferType& operator=(const SmallSubspacesBufferType&) = delete;
+ SmallSubspacesBufferType(SmallSubspacesBufferType&&) noexcept = default;
+ SmallSubspacesBufferType& operator=(SmallSubspacesBufferType&&) noexcept = default;
+ SmallSubspacesBufferType(uint32_t array_size, const AllocSpec& spec, std::shared_ptr<vespalib::alloc::MemoryAllocator> memory_allocator, TensorBufferTypeMapper& type_mapper) noexcept;
+ ~SmallSubspacesBufferType() override;
+ void cleanHold(void* buffer, size_t offset, ElemCount numElems, CleanContext cleanCtx) override;
+ void destroyElements(void *buffer, ElemCount numElems) override;
+ void fallbackCopy(void *newBuffer, const void *oldBuffer, ElemCount numElems) override;
+ void initializeReservedElements(void *buffer, ElemCount reservedElements) override;
+ const vespalib::alloc::MemoryAllocator* get_memory_allocator() const override;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/streamed_value_saver.cpp b/searchlib/src/vespa/searchlib/tensor/streamed_value_saver.cpp
index f7b93654c33..25d3901d761 100644
--- a/searchlib/src/vespa/searchlib/tensor/streamed_value_saver.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/streamed_value_saver.cpp
@@ -31,7 +31,7 @@ StreamedValueSaver::onSave(IAttributeSaveTarget &saveTarget)
const uint32_t docIdLimit(_refs.size());
vespalib::nbostream stream;
for (uint32_t lid = 0; lid < docIdLimit; ++lid) {
- if (_tensorStore.encode_tensor(_refs[lid], stream)) {
+ if (_tensorStore.encode_stored_tensor(_refs[lid], stream)) {
uint32_t sz = stream.size();
datWriter->write(&sz, sizeof(sz));
datWriter->write(stream.peek(), stream.size());
diff --git a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp
index 58e625e6aca..763486f82e2 100644
--- a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp
@@ -204,6 +204,15 @@ StreamedValueStore::get_tensor_entry(EntryRef ref) const
return entry.get();
}
+std::unique_ptr<vespalib::eval::Value>
+StreamedValueStore::get_tensor(EntryRef ref) const
+{
+ if (const auto * ptr = get_tensor_entry(ref)) {
+ return ptr->create_fast_value_view(_tensor_type);
+ }
+ return {};
+}
+
void
StreamedValueStore::holdTensor(EntryRef ref)
{
@@ -229,7 +238,7 @@ StreamedValueStore::move(EntryRef ref)
}
bool
-StreamedValueStore::encode_tensor(EntryRef ref, vespalib::nbostream &target) const
+StreamedValueStore::encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const
{
if (const auto * entry = get_tensor_entry(ref)) {
entry->encode_value(_tensor_type, target);
diff --git a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
index 29201dc0e61..9c5c5a91d18 100644
--- a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
+++ b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
@@ -60,6 +60,7 @@ private:
TensorStoreType _concrete_store;
const vespalib::eval::ValueType _tensor_type;
EntryRef add_entry(TensorEntry::SP tensor);
+ const TensorEntry* get_tensor_entry(EntryRef ref) const;
public:
StreamedValueStore(const vespalib::eval::ValueType &tensor_type);
~StreamedValueStore() override;
@@ -69,8 +70,8 @@ public:
void holdTensor(EntryRef ref) override;
EntryRef move(EntryRef ref) override;
- const TensorEntry * get_tensor_entry(EntryRef ref) const;
- bool encode_tensor(EntryRef ref, vespalib::nbostream &target) const;
+ std::unique_ptr<vespalib::eval::Value> get_tensor(EntryRef ref) const;
+ bool encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const;
EntryRef store_tensor(const vespalib::eval::Value &tensor);
EntryRef store_encoded_tensor(vespalib::nbostream &encoded);
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.cpp
new file mode 100644
index 00000000000..34454e9f780
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.cpp
@@ -0,0 +1,97 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "tensor_buffer_store.h"
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/streamed/streamed_value_builder_factory.h>
+#include <vespa/vespalib/datastore/array_store.hpp>
+#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/datastore/datastore.hpp>
+#include <vespa/vespalib/util/size_literals.h>
+
+using vespalib::alloc::MemoryAllocator;
+using vespalib::datastore::EntryRef;
+using vespalib::eval::StreamedValueBuilderFactory;
+using vespalib::eval::Value;
+using vespalib::eval::ValueType;
+
+namespace search::tensor {
+
+namespace {
+
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+}
+
+TensorBufferStore::TensorBufferStore(const ValueType& tensor_type, std::shared_ptr<MemoryAllocator> allocator, uint32_t max_small_subspaces_type_id)
+ : TensorStore(ArrayStoreType::get_data_store_base(_array_store)),
+ _tensor_type(tensor_type),
+ _ops(_tensor_type),
+ _array_store(ArrayStoreType::optimizedConfigForHugePage(max_small_subspaces_type_id,
+ TensorBufferTypeMapper(max_small_subspaces_type_id, &_ops),
+ MemoryAllocator::HUGEPAGE_SIZE, 4_Ki, 8_Ki, ALLOC_GROW_FACTOR),
+ std::move(allocator), TensorBufferTypeMapper(max_small_subspaces_type_id, &_ops))
+{
+}
+
+TensorBufferStore::~TensorBufferStore() = default;
+
+void
+TensorBufferStore::holdTensor(EntryRef ref)
+{
+ _array_store.remove(ref);
+}
+
+EntryRef
+TensorBufferStore::move(EntryRef ref)
+{
+ if (!ref.valid()) {
+ return EntryRef();
+ }
+ auto buf = _array_store.get(ref);
+ auto new_ref = _array_store.add(buf);
+ _ops.copied_labels(buf);
+ _array_store.remove(ref);
+ return new_ref;
+}
+
+EntryRef
+TensorBufferStore::store_tensor(const Value &tensor)
+{
+ uint32_t num_subspaces = tensor.index().size();
+ auto array_size = _ops.get_array_size(num_subspaces);
+ auto ref = _array_store.allocate(array_size);
+ auto buf = _array_store.get_writable(ref);
+ _ops.store_tensor(buf, tensor);
+ return ref;
+}
+
+EntryRef
+TensorBufferStore::store_encoded_tensor(vespalib::nbostream &encoded)
+{
+ const auto &factory = StreamedValueBuilderFactory::get();
+ auto val = vespalib::eval::decode_value(encoded, factory);
+ return store_tensor(*val);
+}
+
+std::unique_ptr<Value>
+TensorBufferStore::get_tensor(EntryRef ref) const
+{
+ if (!ref.valid()) {
+ return {};
+ }
+ auto buf = _array_store.get(ref);
+ return _ops.make_fast_view(buf, _tensor_type);
+}
+
+bool
+TensorBufferStore::encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const
+{
+ if (!ref.valid()) {
+ return false;
+ }
+ auto buf = _array_store.get(ref);
+ _ops.encode_stored_tensor(buf, _tensor_type, target);
+ return true;
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.h b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.h
new file mode 100644
index 00000000000..18b98efa8fa
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_store.h
@@ -0,0 +1,37 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "tensor_store.h"
+#include "tensor_buffer_operations.h"
+#include "tensor_buffer_type_mapper.h"
+#include "large_subspaces_buffer_type.h"
+#include "small_subspaces_buffer_type.h"
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/vespalib/datastore/array_store.h>
+
+namespace search::tensor {
+
+/**
+ * Class for storing tensor buffers in memory and making tensor views
+ * based on stored tensor buffer.
+ */
+class TensorBufferStore : public TensorStore
+{
+ using RefType = vespalib::datastore::EntryRefT<19>;
+ using ArrayStoreType = vespalib::datastore::ArrayStore<char, RefType, TensorBufferTypeMapper>;
+ vespalib::eval::ValueType _tensor_type;
+ TensorBufferOperations _ops;
+ ArrayStoreType _array_store;
+public:
+ TensorBufferStore(const vespalib::eval::ValueType& tensor_type, std::shared_ptr<vespalib::alloc::MemoryAllocator> allocator, uint32_t max_small_subspaces_type_id);
+ ~TensorBufferStore();
+ void holdTensor(EntryRef ref) override;
+ EntryRef move(EntryRef ref) override;
+ EntryRef store_tensor(const vespalib::eval::Value &tensor);
+ EntryRef store_encoded_tensor(vespalib::nbostream &encoded);
+ std::unique_ptr<vespalib::eval::Value> get_tensor(EntryRef ref) const;
+ bool encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.cpp
new file mode 100644
index 00000000000..b4b0b9bbc79
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.cpp
@@ -0,0 +1,47 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "tensor_buffer_type_mapper.h"
+#include "tensor_buffer_operations.h"
+#include <algorithm>
+
+namespace search::tensor {
+
+TensorBufferTypeMapper::TensorBufferTypeMapper()
+ : _array_sizes(),
+ _ops(nullptr)
+{
+}
+
+TensorBufferTypeMapper::TensorBufferTypeMapper(uint32_t max_small_subspaces_type_id, TensorBufferOperations* ops)
+ : _array_sizes(),
+ _ops(ops)
+{
+ _array_sizes.reserve(max_small_subspaces_type_id + 1);
+ _array_sizes.emplace_back(0); // type id 0 uses LargeSubspacesBufferType
+ for (uint32_t type_id = 1; type_id <= max_small_subspaces_type_id; ++type_id) {
+ auto num_subspaces = type_id - 1;
+ _array_sizes.emplace_back(_ops->get_array_size(num_subspaces));
+ }
+}
+
+TensorBufferTypeMapper::~TensorBufferTypeMapper() = default;
+
+uint32_t
+TensorBufferTypeMapper::get_type_id(size_t array_size) const
+{
+ assert(!_array_sizes.empty());
+ auto result = std::lower_bound(_array_sizes.begin() + 1, _array_sizes.end(), array_size);
+ if (result == _array_sizes.end()) {
+ return 0; // type id 0 uses LargeSubspacesBufferType
+ }
+ return result - _array_sizes.begin();
+}
+
+size_t
+TensorBufferTypeMapper::get_array_size(uint32_t type_id) const
+{
+ assert(type_id > 0 && type_id < _array_sizes.size());
+ return _array_sizes[type_id];
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.h b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.h
new file mode 100644
index 00000000000..1e02c1cb608
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_type_mapper.h
@@ -0,0 +1,35 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cstdint>
+#include <vector>
+
+namespace search::tensor {
+
+class LargeSubspacesBufferType;
+class SmallSubspacesBufferType;
+class TensorBufferOperations;
+
+/*
+ * This class provides mapping between type ids and array sizes needed for
+ * storing a tensor.
+ */
+class TensorBufferTypeMapper
+{
+ std::vector<size_t> _array_sizes;
+ TensorBufferOperations* _ops;
+public:
+ using SmallBufferType = SmallSubspacesBufferType;
+ using LargeBufferType = LargeSubspacesBufferType;
+
+ TensorBufferTypeMapper();
+ TensorBufferTypeMapper(uint32_t max_small_subspaces_type_id, TensorBufferOperations* ops);
+ ~TensorBufferTypeMapper();
+
+ uint32_t get_type_id(size_t array_size) const;
+ size_t get_array_size(uint32_t type_id) const;
+ TensorBufferOperations& get_tensor_buffer_operations() const noexcept { return *_ops; }
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
index 433f543ab92..11b6a1e3020 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
@@ -199,14 +199,14 @@ FakeMemTreeOccMgr::sync()
void
FakeMemTreeOccMgr::add(uint32_t wordIdx, index::DocIdAndFeatures &features)
{
- typedef FeatureStore::RefType RefType;
-
const FakeWord *fw = _fakeWords[wordIdx];
std::pair<EntryRef, uint64_t> r =
_featureStore.addFeatures(fw->getPackedIndex(), features);
+ size_t feature_size = (r.second + 7) / 8;
+ feature_size += FeatureStore::calc_pad(feature_size);
- _featureSizes[wordIdx] += RefType::align((r.second + 7) / 8) * 8;
+ _featureSizes[wordIdx] += feature_size * 8;
_unflushed.push_back(PendingOp(wordIdx, features.doc_id(), r.first));
@@ -240,7 +240,6 @@ FakeMemTreeOccMgr::sortUnflushed()
void
FakeMemTreeOccMgr::flush()
{
- typedef FeatureStore::RefType RefType;
typedef std::vector<PendingOp>::iterator I;
if (_unflushed.empty())
@@ -264,7 +263,9 @@ FakeMemTreeOccMgr::flush()
if (i->getRemove()) {
if (itr.valid() && itr.getKey() == docId) {
uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData().get_features_relaxed()));
- _featureSizes[wordIdx] -= RefType::align((bits + 7) / 8) * 8;
+ size_t feature_size = (bits + 7) / 8;
+ feature_size += FeatureStore::calc_pad(feature_size);
+ _featureSizes[wordIdx] -= feature_size * 8;
tree.remove(itr);
}
} else {