summaryrefslogtreecommitdiffstats
path: root/searchlib/src/vespa/searchlib/tensor/streamed_value_store.h
blob: 3a9d9a0b7b402dfe7c4e01eecebddce22f2fd971 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

#pragma once

#include "tensor_store.h"
#include <vespa/eval/eval/value_type.h>
#include <vespa/eval/eval/value.h>
#include <vespa/eval/streamed/streamed_value.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/shared_string_repo.h>

namespace search::tensor {

/**
 * Class for StreamedValue tensors in memory.
 */
class StreamedValueStore : public TensorStore {
public:
    using Value = vespalib::eval::Value;
    using ValueType = vespalib::eval::ValueType;
    using Handles = vespalib::SharedStringRepo::StrongHandles;
    using MemoryUsage = vespalib::MemoryUsage;

    // interface for tensor entries
    struct TensorEntry {
        using SP = std::shared_ptr<TensorEntry>;
        virtual Value::UP create_fast_value_view(const ValueType &type_ref) const = 0;
        virtual void encode_value(const ValueType &type, vespalib::nbostream &target) const = 0;
        virtual MemoryUsage get_memory_usage() const = 0;
        virtual ~TensorEntry();
        static TensorEntry::SP create_shared_entry(const Value &value);
    };

    // implementation of tensor entries
    template <typename CT>
    struct TensorEntryImpl : public TensorEntry {
        Handles handles;
        std::vector<CT> cells;
        TensorEntryImpl(const Value &value, size_t num_mapped, size_t dense_size);
        Value::UP create_fast_value_view(const ValueType &type_ref) const override;
        void encode_value(const ValueType &type, vespalib::nbostream &target) const override;
        MemoryUsage get_memory_usage() const override;
        ~TensorEntryImpl() override;
    };

private:
    // Note: Must use SP (instead of UP) because of fallbackCopy() and initializeReservedElements() in BufferType,
    //       and implementation of move().
    using TensorStoreType = vespalib::datastore::DataStore<TensorEntry::SP>;

    class TensorBufferType : public vespalib::datastore::BufferType<TensorEntry::SP> {
    private:
        using ParentType = BufferType<TensorEntry::SP>;
        using ParentType::_emptyEntry;
        using CleanContext = typename ParentType::CleanContext;
    public:
        TensorBufferType();
        virtual void cleanHold(void* buffer, size_t offset, size_t num_elems, CleanContext clean_ctx) override;
    };
    TensorStoreType _concrete_store;
    const vespalib::eval::ValueType _tensor_type;
    EntryRef add_entry(TensorEntry::SP tensor);
public:
    StreamedValueStore(const vespalib::eval::ValueType &tensor_type);
    ~StreamedValueStore() override;

    using RefType = TensorStoreType::RefType;

    void holdTensor(EntryRef ref) override;
    EntryRef move(EntryRef ref) override;

    const TensorEntry * get_tensor_entry(EntryRef ref) const;
    bool encode_tensor(EntryRef ref, vespalib::nbostream &target) const;

    EntryRef store_tensor(const vespalib::eval::Value &tensor);
    EntryRef store_encoded_tensor(vespalib::nbostream &encoded);
};


}