diff options
Diffstat (limited to 'searchlib')
-rw-r--r-- | searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp | 3 | ||||
-rw-r--r-- | searchlib/src/vespa/searchlib/tensor/hnsw_index.h | 3 | ||||
-rw-r--r-- | searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp | 14 |
3 files changed, 10 insertions, 10 deletions
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp index 03759e8a5cc..d22f24cc7da 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp @@ -451,8 +451,7 @@ template <HnswIndexType type> void HnswIndex<type>::internal_prepare_add_node(typename HnswIndex::PreparedAddDoc& op, TypedCells input_vector, const typename GraphType::EntryNode& entry) const { - // TODO: Add capping on num_levels - int node_max_level = _level_generator->max_level(); + int node_max_level = std::min(_level_generator->max_level(), max_max_level); std::vector<typename PreparedAddNode::Links> connections(node_max_level + 1); if (entry.nodeid == 0) { // graph has no entry point diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h index 272c3df5f2f..bcec0838edf 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h @@ -75,6 +75,9 @@ protected: } } + // Clamp level generator member function max_level() return value + static constexpr uint32_t max_max_level = 29; + GraphType _graph; const DocVectorAccess& _vectors; DistanceFunction::UP _distance_func; diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp index b63034acfc4..9ee8d9fdf46 100644 --- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp @@ -83,9 +83,10 @@ uint32_t TensorAttribute::clearDoc(DocId docId) { consider_remove_from_index(docId); - EntryRef oldRef(_refVector[docId].load_relaxed()); updateUncommittedDocIdLimit(docId); - _refVector[docId] = AtomicEntryRef(); + auto& elem_ref = _refVector[docId]; + EntryRef oldRef(elem_ref.load_relaxed()); + elem_ref.store_relaxed(EntryRef()); if (oldRef.valid()) { _tensorStore.holdTensor(oldRef); return 1u; @@ -96,7 +97,6 @@ TensorAttribute::clearDoc(DocId docId) void TensorAttribute::onCommit() { - // Note: Cost can be reduced if unneeded generation increments are dropped incGeneration(); if (_tensorStore.consider_compact()) { auto context = _tensorStore.start_compact(getConfig().getCompactionStrategy()); @@ -178,11 +178,9 @@ TensorAttribute::setTensorRef(DocId docId, EntryRef ref) { assert(docId < _refVector.size()); updateUncommittedDocIdLimit(docId); - // TODO: validate if following fence is sufficient. - std::atomic_thread_fence(std::memory_order_release); - // TODO: Check if refVector must consist of std::atomic<EntryRef> - EntryRef oldRef(_refVector[docId].load_relaxed()); - _refVector[docId].store_release(ref); + auto& elem_ref = _refVector[docId]; + EntryRef oldRef(elem_ref.load_relaxed()); + elem_ref.store_release(ref); if (oldRef.valid()) { _tensorStore.holdTensor(oldRef); } |