summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2022-09-30 10:41:25 +0200
committerTor Egge <Tor.Egge@online.no>2022-09-30 10:41:25 +0200
commit1668220ebc309ac482279d5823dcbb4928b7c3e4 (patch)
treec172ed916013b16718f0a4be9b8a1a8fd7f4eb34
parent2e6bbf5f13775f6649aec913ff3b6725ca74ed25 (diff)
Add tensor buffer operations.
-rw-r--r--searchlib/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/tensor/tensor_buffer_operations/CMakeLists.txt9
-rw-r--r--searchlib/src/tests/tensor/tensor_buffer_operations/tensor_buffer_operations_test.cpp188
-rw-r--r--searchlib/src/vespa/searchlib/tensor/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.cpp188
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.h81
-rw-r--r--vespalib/src/vespa/vespalib/util/shared_string_repo.h4
-rw-r--r--vespalib/src/vespa/vespalib/util/string_id.h1
8 files changed, 473 insertions, 0 deletions
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index 80a02b0d928..76dadc5605e 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -223,6 +223,7 @@ vespa_define_module(
src/tests/tensor/distance_functions
src/tests/tensor/hnsw_index
src/tests/tensor/hnsw_saver
+ src/tests/tensor/tensor_buffer_operations
src/tests/transactionlog
src/tests/transactionlogstress
src/tests/true
diff --git a/searchlib/src/tests/tensor/tensor_buffer_operations/CMakeLists.txt b/searchlib/src/tests/tensor/tensor_buffer_operations/CMakeLists.txt
new file mode 100644
index 00000000000..075434ec0c0
--- /dev/null
+++ b/searchlib/src/tests/tensor/tensor_buffer_operations/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchlib_tensor_buffer_operations_test_app TEST
+ SOURCES
+ tensor_buffer_operations_test.cpp
+ DEPENDS
+ searchlib
+ GTest::GTest
+)
+vespa_add_test(NAME searchlib_tensor_buffer_operations_test_app COMMAND searchlib_tensor_buffer_operations_test_app)
diff --git a/searchlib/src/tests/tensor/tensor_buffer_operations/tensor_buffer_operations_test.cpp b/searchlib/src/tests/tensor/tensor_buffer_operations/tensor_buffer_operations_test.cpp
new file mode 100644
index 00000000000..04cc7a8a1ea
--- /dev/null
+++ b/searchlib/src/tests/tensor/tensor_buffer_operations/tensor_buffer_operations_test.cpp
@@ -0,0 +1,188 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchlib/tensor/tensor_buffer_operations.h>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/streamed/streamed_value_builder_factory.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using search::tensor::TensorBufferOperations;
+using vespalib::eval::SimpleValue;
+using vespalib::eval::StreamedValueBuilderFactory;
+using vespalib::eval::TensorSpec;
+using vespalib::eval::Value;
+using vespalib::eval::ValueType;
+using vespalib::eval::TypedCells;
+
+const vespalib::string tensor_type_spec("tensor(x{})");
+const vespalib::string tensor_type_2d_spec("tensor(x{},y{})");
+const vespalib::string tensor_type_2d_mixed_spec("tensor(x{},y[2])");
+const vespalib::string float_tensor_type_spec("tensor<float>(y{})");
+
+struct TestParam
+{
+ vespalib::string _name;
+ std::vector<size_t> _array_sizes;
+ TensorSpec _tensor_spec;
+ TestParam(vespalib::string name, std::vector<size_t> array_sizes, TensorSpec tensor_spec)
+ : _name(std::move(name)),
+ _array_sizes(std::move(array_sizes)),
+ _tensor_spec(std::move(tensor_spec))
+ {
+ }
+ ~TestParam();
+};
+
+TestParam::~TestParam() = default;
+
+std::ostream& operator<<(std::ostream& os, const TestParam& param)
+{
+ os << param._name;
+ return os;
+}
+
+class TensorBufferOperationsTest : public testing::TestWithParam<TestParam>
+{
+protected:
+ ValueType _tensor_type;
+ TensorBufferOperations _ops;
+ TensorBufferOperationsTest();
+ ~TensorBufferOperationsTest() override;
+ std::vector<size_t> get_array_sizes(uint32_t max_subspaces);
+ std::vector<char> store_tensor(const Value& tensor);
+ std::vector<char> store_tensor(const TensorSpec& spec);
+ std::unique_ptr<Value> load_tensor(vespalib::ConstArrayRef<char> buf);
+ TensorSpec load_tensor_spec(vespalib::ConstArrayRef<char> buf);
+ vespalib::nbostream encode_stored_tensor(vespalib::ConstArrayRef<char> buf);
+ void assert_store_load(const TensorSpec& tensor_spec);
+ void assert_store_copy_load(const TensorSpec& tensor_spec);
+ void assert_store_encode_decode(const TensorSpec& tensor_spec);
+};
+
+TensorBufferOperationsTest::TensorBufferOperationsTest()
+ : testing::TestWithParam<TestParam>(),
+ _tensor_type(ValueType::from_spec(GetParam()._tensor_spec.type())),
+ _ops(_tensor_type)
+{
+}
+
+TensorBufferOperationsTest::~TensorBufferOperationsTest() = default;
+
+std::vector<size_t>
+TensorBufferOperationsTest::get_array_sizes(uint32_t max_subspaces)
+{
+ std::vector<size_t> array_sizes;
+ for (uint32_t num_subspaces = 0; num_subspaces < max_subspaces; ++num_subspaces) {
+ array_sizes.emplace_back(_ops.get_array_size(num_subspaces));
+ }
+ return array_sizes;
+}
+
+std::vector<char>
+TensorBufferOperationsTest::store_tensor(const Value& tensor)
+{
+ EXPECT_EQ(_tensor_type, tensor.type());
+ uint32_t num_subspaces = tensor.index().size();
+ auto array_size = _ops.get_array_size(num_subspaces);
+ std::vector<char> buf;
+ buf.resize(array_size);
+ _ops.store_tensor(buf, tensor);
+ return buf;
+}
+
+std::vector<char>
+TensorBufferOperationsTest::store_tensor(const TensorSpec& spec)
+{
+ auto tensor = SimpleValue::from_spec(spec);
+ return store_tensor(*tensor);
+}
+
+std::unique_ptr<Value>
+TensorBufferOperationsTest::load_tensor(vespalib::ConstArrayRef<char> buf)
+{
+ return _ops.make_fast_view(buf, _tensor_type);
+}
+
+vespalib::nbostream
+TensorBufferOperationsTest::encode_stored_tensor(vespalib::ConstArrayRef<char> buf)
+{
+ vespalib::nbostream out;
+ _ops.encode_stored_tensor(buf, _tensor_type, out);
+ return out;
+}
+
+TensorSpec
+TensorBufferOperationsTest::load_tensor_spec(vespalib::ConstArrayRef<char> buf)
+{
+ auto loaded = load_tensor(buf);
+ return TensorSpec::from_value(*loaded);
+}
+
+void
+TensorBufferOperationsTest::assert_store_load(const TensorSpec& tensor_spec)
+{
+ auto buf = store_tensor(tensor_spec);
+ auto loaded_spec = load_tensor_spec(buf);
+ _ops.reclaim_labels(buf);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+}
+
+void
+TensorBufferOperationsTest::assert_store_copy_load(const TensorSpec& tensor_spec)
+{
+ auto buf = store_tensor(tensor_spec);
+ auto buf2 = buf;
+ _ops.copied_labels(buf2);
+ EXPECT_EQ(buf, buf2);
+ _ops.reclaim_labels(buf);
+ EXPECT_NE(buf, buf2);
+ buf.clear();
+ auto loaded_spec = load_tensor_spec(buf2);
+ _ops.reclaim_labels(buf2);
+ EXPECT_EQ(tensor_spec, loaded_spec);
+}
+
+void
+TensorBufferOperationsTest::assert_store_encode_decode(const TensorSpec& tensor_spec)
+{
+ auto buf = store_tensor(tensor_spec);
+ auto encoded = encode_stored_tensor(buf);
+ _ops.reclaim_labels(buf);
+ const auto& factory = StreamedValueBuilderFactory::get();
+ auto decoded = vespalib::eval::decode_value(encoded, factory);
+ auto decoded_spec = TensorSpec::from_value(*decoded);
+ EXPECT_EQ(tensor_spec, decoded_spec);
+}
+
+VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(TensorBufferOperationsMultiTest,
+ TensorBufferOperationsTest,
+ testing::Values(TestParam("1d", {8, 16, 32, 40, 64}, TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5)),
+ TestParam("1dmulti", {8, 16, 32, 40, 64}, TensorSpec(tensor_type_spec).add({{"x", "a"}}, 4.5).add({{"x", "c"}}, 4.25)),
+ TestParam("1dfloat", {4, 12, 20, 28, 36}, TensorSpec(float_tensor_type_spec).add({{"y", "aa"}}, 4.25)),
+ TestParam("2d", {8, 24, 40, 56, 80}, TensorSpec(tensor_type_2d_spec).add({{"x", "a"},{"y", "aa"}}, 4.75)),
+ TestParam("2dmixed", {8, 24, 48, 64, 96}, TensorSpec(tensor_type_2d_mixed_spec).add({{"x", "a"},{"y", 0}}, 4.5).add({{"x", "a"},{"y", 1}}, 4.25))),
+ testing::PrintToStringParamName());
+
+TEST_P(TensorBufferOperationsTest, array_sizes_are_calculated)
+{
+ EXPECT_EQ(GetParam()._array_sizes, get_array_sizes(5));
+}
+
+TEST_P(TensorBufferOperationsTest, tensor_can_be_stored_and_loaded)
+{
+ assert_store_load(GetParam()._tensor_spec);
+}
+
+TEST_P(TensorBufferOperationsTest, tensor_buffer_can_be_copied)
+{
+ assert_store_copy_load(GetParam()._tensor_spec);
+}
+
+TEST_P(TensorBufferOperationsTest, tensor_buffer_can_be_encoded)
+{
+ assert_store_encode_decode(GetParam()._tensor_spec);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
index 9e0ccb8d37a..7815ef7e770 100644
--- a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
@@ -30,6 +30,7 @@ vespa_add_library(searchlib_tensor OBJECT
streamed_value_saver.cpp
streamed_value_store.cpp
tensor_attribute.cpp
+ tensor_buffer_operations.cpp
tensor_deserialize.cpp
tensor_store.cpp
reusable_set_visited_tracker.cpp
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.cpp
new file mode 100644
index 00000000000..ba8b550a779
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.cpp
@@ -0,0 +1,188 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "tensor_buffer_operations.h"
+#include <vespa/eval/eval/fast_value.hpp>
+#include <vespa/eval/eval/value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/eval/streamed/streamed_value_view.h>
+#include <vespa/vespalib/util/arrayref.h>
+#include <vespa/vespalib/util/shared_string_repo.h>
+#include <algorithm>
+
+using vespalib::ArrayRef;
+using vespalib::ConstArrayRef;
+using vespalib::MemoryUsage;
+using vespalib::SharedStringRepo;
+using vespalib::StringIdVector;
+using vespalib::eval::FastAddrMap;
+using vespalib::eval::FastValueIndex;
+using vespalib::eval::StreamedValueView;
+using vespalib::eval::TypedCells;
+using vespalib::eval::Value;
+using vespalib::eval::ValueType;
+using vespalib::eval::self_memory_usage;
+using vespalib::string_id;
+
+namespace search::tensor {
+
+namespace {
+
+uint32_t
+adjust_min_alignment(size_t min_alignment)
+{
+ // Also apply alignment for num_subspaces and labels
+ return std::max(std::max(sizeof(uint32_t), sizeof(string_id)), min_alignment);
+}
+
+struct MyFastValueView final : Value {
+ const ValueType& _type;
+ StringIdVector _labels;
+ FastValueIndex _index;
+ TypedCells _cells;
+ MyFastValueView(const ValueType& type, ConstArrayRef<string_id> labels, TypedCells cells, size_t num_mapped_dimensions, size_t num_subspaces);
+ const ValueType& type() const override { return _type; }
+ const Value::Index& index() const override { return _index; }
+ TypedCells cells() const override { return _cells; }
+ MemoryUsage get_memory_usage() const override {
+ MemoryUsage usage = self_memory_usage<MyFastValueView>();
+ usage.merge(_index.map.estimate_extra_memory_usage());
+ return usage;
+ }
+};
+
+MyFastValueView::MyFastValueView(const ValueType& type, ConstArrayRef<string_id> labels, TypedCells cells, size_t num_mapped_dimensions, size_t num_subspaces)
+ : Value(),
+ _type(type),
+ _labels(labels.begin(), labels.end()),
+ _index(num_mapped_dimensions, _labels, num_subspaces),
+ _cells(cells)
+{
+ for (size_t i = 0; i < num_subspaces; ++i) {
+ ConstArrayRef<string_id> addr(_labels.data() + (i * num_mapped_dimensions), num_mapped_dimensions);
+ _index.map.add_mapping(FastAddrMap::hash_labels(addr));
+ }
+ assert(_index.map.size() == num_subspaces);
+}
+
+}
+
+TensorBufferOperations::TensorBufferOperations(const vespalib::eval::ValueType& tensor_type)
+ : _num_mapped_dimensions(tensor_type.count_mapped_dimensions()),
+ _cell_mem_size(vespalib::eval::CellTypeUtils::mem_size(tensor_type.cell_type(), 1u)),
+ _min_alignment(adjust_min_alignment(vespalib::eval::CellTypeUtils::alignment(tensor_type.cell_type()))),
+ _dense_subspace_size(tensor_type.dense_subspace_size()),
+ _cell_type(tensor_type.cell_type()),
+ _addr(_num_mapped_dimensions),
+ _addr_refs()
+{
+ _addr_refs.reserve(_addr.size());
+ for (auto& label : _addr) {
+ _addr_refs.push_back(&label);
+ }
+}
+
+TensorBufferOperations::~TensorBufferOperations() = default;
+
+uint32_t
+TensorBufferOperations::get_num_subspaces(ConstArrayRef<char> buf) const noexcept
+{
+ assert(buf.size() >= get_num_subspaces_size());
+ return *reinterpret_cast<const uint32_t*>(buf.data());
+}
+
+void
+TensorBufferOperations::store_tensor(ArrayRef<char> buf, const vespalib::eval::Value& tensor)
+{
+ uint32_t num_subspaces = tensor.index().size();
+ auto labels_end_offset = get_labels_offset() + get_labels_mem_size(num_subspaces);
+ auto cells_size = num_subspaces * _dense_subspace_size;
+ auto cells_mem_size = cells_size * _cell_mem_size; // Size measured in bytes
+ auto alignment = select_alignment(cells_mem_size);
+ auto cells_start_offset = calc_aligned(labels_end_offset, alignment);
+ auto cells_end_offset = cells_start_offset + cells_mem_size;
+ auto store_end = calc_aligned(cells_end_offset, alignment);
+ assert(store_end == get_array_size(num_subspaces));
+ assert(buf.size() >= store_end);
+ *reinterpret_cast<uint32_t*>(buf.data()) = num_subspaces;
+ auto labels = reinterpret_cast<string_id*>(buf.data() + get_labels_offset());
+ size_t subspace = 0;
+ size_t num_subspaces_visited = 0;
+ auto view = tensor.index().create_view({});
+ view->lookup({});
+ while (view->next_result(_addr_refs, subspace)) {
+ assert(subspace < num_subspaces);
+ auto subspace_labels = labels + subspace * _num_mapped_dimensions;
+ for (auto& label : _addr) {
+ SharedStringRepo::unsafe_copy(label); // tensor has an existing ref
+ *subspace_labels = label;
+ ++subspace_labels;
+ }
+ ++num_subspaces_visited;
+ }
+ assert(num_subspaces_visited == num_subspaces);
+ if (labels_end_offset != cells_start_offset) {
+ memset(buf.data() + labels_end_offset, 0, cells_start_offset - labels_end_offset);
+ }
+ auto cells = tensor.cells();
+ assert(cells_size == cells.size);
+ memcpy(buf.data() + cells_start_offset, cells.data, cells_mem_size);
+ if (cells_end_offset != store_end) {
+ memset(buf.data() + cells_end_offset, 0, store_end - cells_end_offset);
+ }
+}
+
+std::unique_ptr<vespalib::eval::Value>
+TensorBufferOperations::make_fast_view(ConstArrayRef<char> buf, const vespalib::eval::ValueType& tensor_type) const
+{
+ auto num_subspaces = get_num_subspaces(buf);
+ assert(buf.size() >= get_array_size(num_subspaces));
+ ConstArrayRef<string_id> labels(reinterpret_cast<const string_id*>(buf.data() + get_labels_offset()), num_subspaces * _num_mapped_dimensions);
+ auto cells_size = num_subspaces * _dense_subspace_size;
+ auto cells_mem_size = cells_size * _cell_mem_size; // Size measured in bytes
+ auto alignment = select_alignment(cells_mem_size);
+ auto cells_start_offset = get_cells_offset(num_subspaces, alignment);
+ TypedCells cells(buf.data() + cells_start_offset, _cell_type, cells_size);
+ assert(cells_start_offset + cells_mem_size <= buf.size());
+ return std::make_unique<MyFastValueView>(tensor_type, labels, cells, _num_mapped_dimensions, num_subspaces);
+}
+
+void
+TensorBufferOperations::copied_labels(ConstArrayRef<char> buf) const
+{
+ auto num_subspaces = get_num_subspaces(buf);
+ ConstArrayRef<string_id> labels(reinterpret_cast<const string_id*>(buf.data() + get_labels_offset()), num_subspaces * _num_mapped_dimensions);
+ for (auto& label : labels) {
+ SharedStringRepo::unsafe_copy(label); // Source buffer has an existing ref
+ }
+}
+
+void
+TensorBufferOperations::reclaim_labels(ArrayRef<char> buf) const
+{
+ auto num_subspaces = get_num_subspaces(buf);
+ ArrayRef<string_id> labels(reinterpret_cast<string_id*>(buf.data() + get_labels_offset()), num_subspaces * _num_mapped_dimensions);
+ for (auto& label : labels) {
+ SharedStringRepo::unsafe_reclaim(label);
+ label = string_id(); // Clear label to avoid double reclaim
+ }
+}
+
+void
+TensorBufferOperations::encode_stored_tensor(ConstArrayRef<char> buf, const vespalib::eval::ValueType& tensor_type, vespalib::nbostream& target) const
+{
+ auto num_subspaces = get_num_subspaces(buf);
+ assert(buf.size() >= get_array_size(num_subspaces));
+ ConstArrayRef<string_id> labels(reinterpret_cast<const string_id*>(buf.data() + get_labels_offset()), num_subspaces * _num_mapped_dimensions);
+ auto cells_size = num_subspaces * _dense_subspace_size;
+ auto cells_mem_size = cells_size * _cell_mem_size; // Size measured in bytes
+ auto alignment = select_alignment(cells_mem_size);
+ auto cells_start_offset = get_cells_offset(num_subspaces, alignment);
+ TypedCells cells(buf.data() + cells_start_offset, _cell_type, cells_size);
+ assert(cells_start_offset + cells_mem_size <= buf.size());
+ StringIdVector labels_copy(labels.begin(), labels.end());
+ StreamedValueView streamed_value_view(tensor_type, _num_mapped_dimensions, cells, num_subspaces, labels_copy);
+ vespalib::eval::encode_value(streamed_value_view, target);
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.h b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.h
new file mode 100644
index 00000000000..72b00e79426
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_buffer_operations.h
@@ -0,0 +1,81 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/cell_type.h>
+#include <vespa/vespalib/util/string_id.h>
+#include <cstddef>
+#include <memory>
+
+namespace vespalib {
+template <typename T> class ArrayRef;
+template <typename T> class ConstArrayRef;
+class nbostream;
+}
+
+namespace vespalib::eval {
+struct Value;
+class ValueType;
+}
+
+namespace search::tensor {
+
+/*
+ * Class used to store a tensor in a buffer and make tensor views based on
+ * buffer content.
+ */
+class TensorBufferOperations
+{
+ uint32_t _num_mapped_dimensions;
+ uint32_t _cell_mem_size;
+ uint32_t _min_alignment;
+ size_t _dense_subspace_size;
+ vespalib::eval::CellType _cell_type;
+ std::vector<vespalib::string_id> _addr;
+ std::vector<vespalib::string_id*> _addr_refs;
+
+ static constexpr size_t CELLS_ALIGNMENT = 16;
+ static constexpr size_t CELLS_ALIGNMENT_MEM_SIZE_MIN = 32;
+
+ static constexpr size_t get_num_subspaces_size() noexcept { return sizeof(uint32_t); }
+ static constexpr size_t get_labels_offset() noexcept { return get_num_subspaces_size(); }
+ static size_t calc_aligned(size_t unaligned, size_t alignment) noexcept {
+ return (unaligned + alignment - 1) & (- alignment);
+ }
+ size_t get_cells_mem_size(uint32_t num_subspaces) const noexcept {
+ return _dense_subspace_size * _cell_mem_size * num_subspaces;
+ }
+ size_t select_alignment(size_t cells_mem_size) const noexcept {
+ return (cells_mem_size < CELLS_ALIGNMENT_MEM_SIZE_MIN) ? _min_alignment : CELLS_ALIGNMENT;
+ }
+ size_t get_labels_mem_size(uint32_t num_subspaces) const noexcept {
+ return sizeof(vespalib::string_id) * _num_mapped_dimensions * num_subspaces;
+ }
+ size_t get_cells_offset(uint32_t num_subspaces, size_t alignment) const noexcept {
+ return calc_aligned(get_labels_offset() + get_labels_mem_size(num_subspaces), alignment);
+ }
+ uint32_t get_num_subspaces(vespalib::ConstArrayRef<char> buf) const noexcept;
+public:
+ size_t get_array_size(uint32_t num_subspaces) const noexcept {
+ auto cells_mem_size = get_cells_mem_size(num_subspaces);
+ auto alignment = select_alignment(cells_mem_size);
+ return get_cells_offset(num_subspaces, alignment) + calc_aligned(cells_mem_size, alignment);
+ }
+ TensorBufferOperations(const vespalib::eval::ValueType& tensor_type);
+ ~TensorBufferOperations();
+ TensorBufferOperations(const TensorBufferOperations&) = delete;
+ TensorBufferOperations(TensorBufferOperations&&) = delete;
+ TensorBufferOperations& operator=(const TensorBufferOperations&) = delete;
+ TensorBufferOperations& operator=(TensorBufferOperations&&) = delete;
+ void store_tensor(vespalib::ArrayRef<char> buf, const vespalib::eval::Value& tensor);
+ std::unique_ptr<vespalib::eval::Value> make_fast_view(vespalib::ConstArrayRef<char> buf, const vespalib::eval::ValueType& tensor_type) const;
+
+ // Increase reference counts for labels after copying tensor buffer
+ void copied_labels(vespalib::ConstArrayRef<char> buf) const;
+ // Decrease reference counts for labels and invalidate them
+ void reclaim_labels(vespalib::ArrayRef<char> buf) const;
+ // Serialize stored tensor to target (used when saving attribute)
+ void encode_stored_tensor(vespalib::ConstArrayRef<char> buf, const vespalib::eval::ValueType& type, vespalib::nbostream& target) const;
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/shared_string_repo.h b/vespalib/src/vespa/vespalib/util/shared_string_repo.h
index 7ed50bf0858..1479220f1b4 100644
--- a/vespalib/src/vespa/vespalib/util/shared_string_repo.h
+++ b/vespalib/src/vespa/vespalib/util/shared_string_repo.h
@@ -329,6 +329,10 @@ public:
}
const StringIdVector &view() const { return _handles; }
};
+
+ // Used by search::tensor::TensorBufferOperations
+ static string_id unsafe_copy(string_id id) { return _repo.copy(id); }
+ static void unsafe_reclaim(string_id id) { return _repo.reclaim(id); }
};
}
diff --git a/vespalib/src/vespa/vespalib/util/string_id.h b/vespalib/src/vespa/vespalib/util/string_id.h
index 7fec1da0bb8..4655a18be1e 100644
--- a/vespalib/src/vespa/vespalib/util/string_id.h
+++ b/vespalib/src/vespa/vespalib/util/string_id.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/vespalib/stllike/allocator.h>
+#include <cstdint>
#include <vector>
namespace vespalib {