// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once #include #include #include #include #include #include namespace search { class BitVector; } namespace vespalib { class MemoryUsage; } namespace search::attribute { /** * Class that caches posting lists (as bit vectors) for a set of search terms. * * Lifetime of cached bit vectors is controlled by calling clear() at regular intervals. */ class BitVectorSearchCache { public: using BitVectorSP = std::shared_ptr; using ReadGuardSP = IDocumentMetaStoreContext::IReadGuard::SP; struct Entry { // We need to keep a document meta store read guard to ensure that no lids that are cached // in the bit vector are re-used until the guard is released. ReadGuardSP dmsReadGuard; BitVectorSP bitVector; uint32_t docIdLimit; Entry(ReadGuardSP dmsReadGuard_, BitVectorSP bitVector_, uint32_t docIdLimit_) noexcept : dmsReadGuard(std::move(dmsReadGuard_)), bitVector(std::move(bitVector_)), docIdLimit(docIdLimit_) {} }; private: using Cache = vespalib::hash_map>; mutable std::shared_mutex _mutex; std::atomic _size; size_t _entries_extra_memory_usage; Cache _cache; public: BitVectorSearchCache(); ~BitVectorSearchCache(); void insert(const vespalib::string &term, std::shared_ptr entry); std::shared_ptr find(const vespalib::string &term) const; size_t size() const { return _size.load(std::memory_order_relaxed); } vespalib::MemoryUsage get_memory_usage() const; void clear(); }; }