summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2022-10-07 15:11:11 +0200
committerTor Egge <Tor.Egge@online.no>2022-10-07 15:11:11 +0200
commit8207d6537a4a4a05f4416c0cf8b6291cada00a56 (patch)
tree2a78353c66dd3755cab05d3085f0700e63a14724
parent2ab9d0d8486e1bce09574ac91507b7e4ea22e6bc (diff)
Remove search::tensor::StreamedValueStore.
-rw-r--r--searchlib/src/vespa/searchlib/tensor/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp288
-rw-r--r--searchlib/src/vespa/searchlib/tensor/streamed_value_store.h84
3 files changed, 0 insertions, 373 deletions
diff --git a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
index 46bfc0909aa..75f453ddcbc 100644
--- a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
@@ -30,7 +30,6 @@ vespa_add_library(searchlib_tensor OBJECT
serialized_fast_value_attribute.cpp
small_subspaces_buffer_type.cpp
streamed_value_saver.cpp
- streamed_value_store.cpp
tensor_attribute.cpp
tensor_buffer_operations.cpp
tensor_buffer_store.cpp
diff --git a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp
deleted file mode 100644
index e8752a3145a..00000000000
--- a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.cpp
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "streamed_value_store.h"
-#include <vespa/eval/eval/value.h>
-#include <vespa/eval/eval/value_codec.h>
-#include <vespa/eval/eval/fast_value.hpp>
-#include <vespa/eval/streamed/streamed_value_builder_factory.h>
-#include <vespa/eval/streamed/streamed_value_view.h>
-#include <vespa/vespalib/datastore/buffer_type.hpp>
-#include <vespa/vespalib/datastore/compacting_buffers.h>
-#include <vespa/vespalib/datastore/compaction_context.h>
-#include <vespa/vespalib/datastore/compaction_strategy.h>
-#include <vespa/vespalib/datastore/datastore.hpp>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/util/size_literals.h>
-#include <vespa/vespalib/util/typify.h>
-#include <vespa/log/log.h>
-
-LOG_SETUP(".searchlib.tensor.streamed_value_store");
-
-using vespalib::datastore::CompactionContext;
-using vespalib::datastore::CompactionSpec;
-using vespalib::datastore::CompactionStrategy;
-using vespalib::datastore::EntryRef;
-using vespalib::datastore::Handle;
-using vespalib::datastore::ICompactionContext;
-using namespace vespalib::eval;
-using vespalib::ConstArrayRef;
-using vespalib::MemoryUsage;
-using vespalib::string_id;
-using vespalib::StringIdVector;
-
-namespace search::tensor {
-
-//-----------------------------------------------------------------------------
-
-namespace {
-
-template <typename CT, typename F>
-void each_subspace(const Value &value, size_t num_mapped, size_t dense_size, F f) {
- size_t subspace;
- std::vector<string_id> addr(num_mapped);
- std::vector<string_id*> refs;
- refs.reserve(addr.size());
- for (string_id &label: addr) {
- refs.push_back(&label);
- }
- auto cells = value.cells().typify<CT>();
- auto view = value.index().create_view({});
- view->lookup({});
- while (view->next_result(refs, subspace)) {
- size_t offset = subspace * dense_size;
- f(ConstArrayRef<string_id>(addr), ConstArrayRef<CT>(cells.begin() + offset, dense_size));
- }
-}
-
-using TensorEntry = StreamedValueStore::TensorEntry;
-
-struct CreateTensorEntry {
- template <typename CT>
- static TensorEntry::SP invoke(const Value &value, size_t num_mapped, size_t dense_size) {
- using EntryImpl = StreamedValueStore::TensorEntryImpl<CT>;
- return std::make_shared<EntryImpl>(value, num_mapped, dense_size);
- }
-};
-
-struct MyFastValueView final : Value {
- const ValueType &my_type;
- FastValueIndex my_index;
- TypedCells my_cells;
- MyFastValueView(const ValueType &type_ref, const StringIdVector &handle_view, TypedCells cells, size_t num_mapped, size_t num_spaces)
- : my_type(type_ref),
- my_index(num_mapped, handle_view, num_spaces),
- my_cells(cells)
- {
- const StringIdVector &labels = handle_view;
- for (size_t i = 0; i < num_spaces; ++i) {
- ConstArrayRef<string_id> addr(labels.data() + (i * num_mapped), num_mapped);
- my_index.map.add_mapping(FastAddrMap::hash_labels(addr));
- }
- assert(my_index.map.size() == num_spaces);
- }
- const ValueType &type() const override { return my_type; }
- const Value::Index &index() const override { return my_index; }
- TypedCells cells() const override { return my_cells; }
- MemoryUsage get_memory_usage() const override {
- MemoryUsage usage = self_memory_usage<MyFastValueView>();
- usage.merge(my_index.map.estimate_extra_memory_usage());
- return usage;
- }
-};
-
-} // <unnamed>
-
-//-----------------------------------------------------------------------------
-
-StreamedValueStore::TensorEntry::~TensorEntry() = default;
-
-StreamedValueStore::TensorEntry::SP
-StreamedValueStore::TensorEntry::create_shared_entry(const Value &value)
-{
- size_t num_mapped = value.type().count_mapped_dimensions();
- size_t dense_size = value.type().dense_subspace_size();
- return vespalib::typify_invoke<1,TypifyCellType,CreateTensorEntry>(value.type().cell_type(), value, num_mapped, dense_size);
-}
-
-template <typename CT>
-StreamedValueStore::TensorEntryImpl<CT>::TensorEntryImpl(const Value &value, size_t num_mapped, size_t dense_size)
- : handles(),
- cells()
-{
- handles.reserve(num_mapped * value.index().size());
- cells.reserve(dense_size * value.index().size());
- auto store_subspace = [&](auto addr, auto data) {
- for (string_id label: addr) {
- handles.push_back(label);
- }
- for (CT entry: data) {
- cells.push_back(entry);
- }
- };
- each_subspace<CT>(value, num_mapped, dense_size, store_subspace);
-}
-
-template <typename CT>
-Value::UP
-StreamedValueStore::TensorEntryImpl<CT>::create_fast_value_view(const ValueType &type_ref) const
-{
- size_t num_mapped = type_ref.count_mapped_dimensions();
- size_t dense_size = type_ref.dense_subspace_size();
- size_t num_spaces = cells.size() / dense_size;
- assert(dense_size * num_spaces == cells.size());
- assert(num_mapped * num_spaces == handles.view().size());
- return std::make_unique<MyFastValueView>(type_ref, handles.view(), TypedCells(cells), num_mapped, num_spaces);
-}
-
-template <typename CT>
-void
-StreamedValueStore::TensorEntryImpl<CT>::encode_value(const ValueType &type, vespalib::nbostream &target) const
-{
- size_t num_mapped = type.count_mapped_dimensions();
- size_t dense_size = type.dense_subspace_size();
- size_t num_spaces = cells.size() / dense_size;
- assert(dense_size * num_spaces == cells.size());
- assert(num_mapped * num_spaces == handles.view().size());
- StreamedValueView my_value(type, num_mapped, TypedCells(cells), num_spaces, handles.view());
- ::vespalib::eval::encode_value(my_value, target);
-}
-
-template <typename CT>
-MemoryUsage
-StreamedValueStore::TensorEntryImpl<CT>::get_memory_usage() const
-{
- MemoryUsage usage = self_memory_usage<TensorEntryImpl<CT>>();
- usage.merge(vector_extra_memory_usage(handles.view()));
- usage.merge(vector_extra_memory_usage(cells));
- return usage;
-}
-
-template <typename CT>
-StreamedValueStore::TensorEntryImpl<CT>::~TensorEntryImpl() = default;
-
-//-----------------------------------------------------------------------------
-
-constexpr size_t MIN_BUFFER_ARRAYS = 8_Ki;
-
-StreamedValueStore::TensorBufferType::TensorBufferType() noexcept
- : ParentType(1, MIN_BUFFER_ARRAYS, TensorStoreType::RefType::offsetSize())
-{
-}
-
-void
-StreamedValueStore::TensorBufferType::cleanHold(void* buffer, size_t offset, ElemCount num_elems, CleanContext clean_ctx)
-{
- TensorEntry::SP* elem = static_cast<TensorEntry::SP*>(buffer) + offset;
- const auto& empty = empty_entry();
- for (size_t i = 0; i < num_elems; ++i) {
- clean_ctx.extraBytesCleaned((*elem)->get_memory_usage().allocatedBytes());
- *elem = empty;
- ++elem;
- }
-}
-
-StreamedValueStore::StreamedValueStore(const ValueType &tensor_type)
- : TensorStore(_concrete_store),
- _concrete_store(std::make_unique<TensorBufferType>()),
- _tensor_type(tensor_type)
-{
- _concrete_store.enableFreeLists();
-}
-
-StreamedValueStore::~StreamedValueStore() = default;
-
-EntryRef
-StreamedValueStore::add_entry(TensorEntry::SP tensor)
-{
- auto ref = _concrete_store.addEntry(tensor);
- auto& state = _concrete_store.getBufferState(RefType(ref).bufferId());
- state.stats().inc_extra_used_bytes(tensor->get_memory_usage().allocatedBytes());
- return ref;
-}
-
-const StreamedValueStore::TensorEntry *
-StreamedValueStore::get_tensor_entry(EntryRef ref) const
-{
- if (!ref.valid()) {
- return nullptr;
- }
- const auto& entry = _concrete_store.getEntry(ref);
- assert(entry);
- return entry.get();
-}
-
-std::unique_ptr<vespalib::eval::Value>
-StreamedValueStore::get_tensor(EntryRef ref) const
-{
- if (const auto * ptr = get_tensor_entry(ref)) {
- return ptr->create_fast_value_view(_tensor_type);
- }
- return {};
-}
-
-void
-StreamedValueStore::holdTensor(EntryRef ref)
-{
- if (!ref.valid()) {
- return;
- }
- const auto& tensor = _concrete_store.getEntry(ref);
- assert(tensor);
- _concrete_store.holdElem(ref, 1, tensor->get_memory_usage().allocatedBytes());
-}
-
-TensorStore::EntryRef
-StreamedValueStore::move(EntryRef ref)
-{
- if (!ref.valid()) {
- return EntryRef();
- }
- const auto& old_tensor = _concrete_store.getEntry(ref);
- assert(old_tensor);
- auto new_ref = add_entry(old_tensor);
- _concrete_store.holdElem(ref, 1, old_tensor->get_memory_usage().allocatedBytes());
- return new_ref;
-}
-
-vespalib::MemoryUsage
-StreamedValueStore::update_stat(const CompactionStrategy& compaction_strategy)
-{
- auto memory_usage = _store.getMemoryUsage();
- _compaction_spec = CompactionSpec(compaction_strategy.should_compact_memory(memory_usage), false);
- return memory_usage;
-}
-
-std::unique_ptr<ICompactionContext>
-StreamedValueStore::start_compact(const CompactionStrategy& compaction_strategy)
-{
- auto compacting_buffers = _store.start_compact_worst_buffers(_compaction_spec, compaction_strategy);
- return std::make_unique<CompactionContext>(*this, std::move(compacting_buffers));
-}
-
-bool
-StreamedValueStore::encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const
-{
- if (const auto * entry = get_tensor_entry(ref)) {
- entry->encode_value(_tensor_type, target);
- return true;
- } else {
- return false;
- }
-}
-
-TensorStore::EntryRef
-StreamedValueStore::store_tensor(const Value &tensor)
-{
- assert(tensor.type() == _tensor_type);
- return add_entry(TensorEntry::create_shared_entry(tensor));
-}
-
-TensorStore::EntryRef
-StreamedValueStore::store_encoded_tensor(vespalib::nbostream &encoded)
-{
- const auto &factory = StreamedValueBuilderFactory::get();
- auto val = vespalib::eval::decode_value(encoded, factory);
- return store_tensor(*val);
-}
-
-}
diff --git a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h b/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
deleted file mode 100644
index 58137e316dd..00000000000
--- a/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "tensor_store.h"
-#include <vespa/eval/eval/value_type.h>
-#include <vespa/eval/eval/value.h>
-#include <vespa/eval/streamed/streamed_value.h>
-#include <vespa/vespalib/datastore/datastore.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/util/shared_string_repo.h>
-
-namespace search::tensor {
-
-/**
- * Class for StreamedValue tensors in memory.
- */
-class StreamedValueStore : public TensorStore {
-public:
- using Value = vespalib::eval::Value;
- using ValueType = vespalib::eval::ValueType;
- using Handles = vespalib::SharedStringRepo::Handles;
- using MemoryUsage = vespalib::MemoryUsage;
-
- // interface for tensor entries
- struct TensorEntry {
- using SP = std::shared_ptr<TensorEntry>;
- virtual Value::UP create_fast_value_view(const ValueType &type_ref) const = 0;
- virtual void encode_value(const ValueType &type, vespalib::nbostream &target) const = 0;
- virtual MemoryUsage get_memory_usage() const = 0;
- virtual ~TensorEntry();
- static TensorEntry::SP create_shared_entry(const Value &value);
- };
-
- // implementation of tensor entries
- template <typename CT>
- struct TensorEntryImpl : public TensorEntry {
- Handles handles;
- std::vector<CT> cells;
- TensorEntryImpl(const Value &value, size_t num_mapped, size_t dense_size);
- Value::UP create_fast_value_view(const ValueType &type_ref) const override;
- void encode_value(const ValueType &type, vespalib::nbostream &target) const override;
- MemoryUsage get_memory_usage() const override;
- ~TensorEntryImpl() override;
- };
-
-private:
- // Note: Must use SP (instead of UP) because of fallbackCopy() and initializeReservedElements() in BufferType,
- // and implementation of move().
- using TensorStoreType = vespalib::datastore::DataStore<TensorEntry::SP>;
-
- class TensorBufferType : public vespalib::datastore::BufferType<TensorEntry::SP> {
- private:
- using ParentType = BufferType<TensorEntry::SP>;
- using ParentType::empty_entry;
- using CleanContext = typename ParentType::CleanContext;
- public:
- TensorBufferType() noexcept;
- void cleanHold(void* buffer, size_t offset, ElemCount num_elems, CleanContext clean_ctx) override;
- };
- TensorStoreType _concrete_store;
- const vespalib::eval::ValueType _tensor_type;
- EntryRef add_entry(TensorEntry::SP tensor);
- const TensorEntry* get_tensor_entry(EntryRef ref) const;
-public:
- StreamedValueStore(const vespalib::eval::ValueType &tensor_type);
- ~StreamedValueStore() override;
-
- using RefType = TensorStoreType::RefType;
-
- void holdTensor(EntryRef ref) override;
- EntryRef move(EntryRef ref) override;
- vespalib::MemoryUsage update_stat(const vespalib::datastore::CompactionStrategy& compaction_strategy) override;
- std::unique_ptr<vespalib::datastore::ICompactionContext> start_compact(const vespalib::datastore::CompactionStrategy& compaction_strategy) override;
-
- std::unique_ptr<vespalib::eval::Value> get_tensor(EntryRef ref) const;
- bool encode_stored_tensor(EntryRef ref, vespalib::nbostream &target) const;
-
- EntryRef store_tensor(const vespalib::eval::Value &tensor);
- EntryRef store_encoded_tensor(vespalib::nbostream &encoded);
-};
-
-
-}