From 34570bf83e8fd5ca1f4000929ea4735235b33367 Mon Sep 17 00:00:00 2001 From: Geir Storli Date: Wed, 2 Sep 2020 07:36:44 +0000 Subject: Extra used bytes must also be accounted in allocated bytes when creating memory stats. --- .../tests/datastore/datastore/datastore_test.cpp | 39 ++++++++++++---------- .../src/vespa/vespalib/datastore/datastorebase.cpp | 35 ++++++++++--------- 2 files changed, 41 insertions(+), 33 deletions(-) (limited to 'vespalib') diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp index 00da8835c06..448df2d24bd 100644 --- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp +++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp @@ -23,25 +23,15 @@ private: using ParentType::_activeBufferIds; public: MyStore() {} - - void - holdBuffer(uint32_t bufferId) - { + void holdBuffer(uint32_t bufferId) { ParentType::holdBuffer(bufferId); } - - void - holdElem(EntryRef ref, uint64_t len) - { + void holdElem(EntryRef ref, uint64_t len) { ParentType::holdElem(ref, len); } - - void - transferHoldLists(generation_t generation) - { + void transferHoldLists(generation_t generation) { ParentType::transferHoldLists(generation); } - void trimElemHoldList(generation_t usedGen) override { ParentType::trimElemHoldList(usedGen); } @@ -54,13 +44,13 @@ public: void enableFreeLists() { ParentType::enableFreeLists(); } - - void - switchActiveBuffer() - { + void switchActiveBuffer() { ParentType::switchActiveBuffer(0, 0u); } size_t activeBufferId() const { return _activeBufferIds[0]; } + BufferState& get_active_buffer_state() { + return ParentType::getBufferState(activeBufferId()); + } }; @@ -454,6 +444,21 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated) m._freeBuffers = MyRef::numBuffers() - 1; m._holdBuffers = 0; assertMemStats(m, s.getMemStats()); + + { // increase extra used bytes + auto prev_stats = s.getMemStats(); + s.get_active_buffer_state().incExtraUsedBytes(50); + auto curr_stats = s.getMemStats(); + EXPECT_EQ(prev_stats._allocBytes + 50, curr_stats._allocBytes); + EXPECT_EQ(prev_stats._usedBytes + 50, curr_stats._usedBytes); + } + + { // increase extra hold bytes + auto prev_stats = s.getMemStats(); + s.get_active_buffer_state().incExtraHoldBytes(30); + auto curr_stats = s.getMemStats(); + EXPECT_EQ(prev_stats._holdBytes + 30, curr_stats._holdBytes); + } } TEST(DataStoreTest, require_that_memory_usage_is_calculated) diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp index 4e68b10d76a..15135af67b5 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp @@ -271,6 +271,23 @@ DataStoreBase::disableElemHoldList() } } +namespace { + +void +add_buffer_state_to_mem_stats(const BufferState& state, size_t elementSize, DataStoreBase::MemStats& stats) +{ + size_t extra_used_bytes = state.getExtraUsedBytes(); + stats._allocElems += state.capacity(); + stats._usedElems += state.size(); + stats._deadElems += state.getDeadElems(); + stats._holdElems += state.getHoldElems(); + stats._allocBytes += (state.capacity() * elementSize) + extra_used_bytes; + stats._usedBytes += (state.size() * elementSize) + extra_used_bytes; + stats._deadBytes += state.getDeadElems() * elementSize; + stats._holdBytes += (state.getHoldElems() * elementSize) + state.getExtraHoldBytes(); +} + +} DataStoreBase::MemStats DataStoreBase::getMemStats() const @@ -285,25 +302,11 @@ DataStoreBase::getMemStats() const } else if (state == BufferState::ACTIVE) { size_t elementSize = typeHandler->elementSize(); ++stats._activeBuffers; - stats._allocElems += bState.capacity(); - stats._usedElems += bState.size(); - stats._deadElems += bState.getDeadElems(); - stats._holdElems += bState.getHoldElems(); - stats._allocBytes += bState.capacity() * elementSize; - stats._usedBytes += (bState.size() * elementSize) + bState.getExtraUsedBytes(); - stats._deadBytes += bState.getDeadElems() * elementSize; - stats._holdBytes += (bState.getHoldElems() * elementSize) + bState.getExtraHoldBytes(); + add_buffer_state_to_mem_stats(bState, elementSize, stats); } else if (state == BufferState::HOLD) { size_t elementSize = typeHandler->elementSize(); ++stats._holdBuffers; - stats._allocElems += bState.capacity(); - stats._usedElems += bState.size(); - stats._deadElems += bState.getDeadElems(); - stats._holdElems += bState.getHoldElems(); - stats._allocBytes += bState.capacity() * elementSize; - stats._usedBytes += (bState.size() * elementSize) + bState.getExtraUsedBytes(); - stats._deadBytes += bState.getDeadElems() * elementSize; - stats._holdBytes += (bState.getHoldElems() * elementSize) + bState.getExtraHoldBytes(); + add_buffer_state_to_mem_stats(bState, elementSize, stats); } else { LOG_ABORT("should not be reached"); } -- cgit v1.2.3 From 16e05ad4e7c5792db9808cc00fec9e499afd09c4 Mon Sep 17 00:00:00 2001 From: Geir Storli Date: Wed, 2 Sep 2020 09:45:37 +0000 Subject: Track heap allocated memory for tensors in DirectTensorStore. --- .../direct_tensor_store_test.cpp | 18 ++++++++- .../vespa/searchlib/tensor/direct_tensor_store.cpp | 46 ++++++++++++++++------ .../vespa/searchlib/tensor/direct_tensor_store.h | 18 +++++++-- vespalib/src/vespa/vespalib/datastore/datastore.h | 9 +++-- .../src/vespa/vespalib/datastore/datastore.hpp | 8 ++-- 5 files changed, 76 insertions(+), 23 deletions(-) (limited to 'vespalib') diff --git a/searchlib/src/tests/tensor/direct_tensor_store/direct_tensor_store_test.cpp b/searchlib/src/tests/tensor/direct_tensor_store/direct_tensor_store_test.cpp index 1932e6d7d1f..600ead15bdb 100644 --- a/searchlib/src/tests/tensor/direct_tensor_store/direct_tensor_store_test.cpp +++ b/searchlib/src/tests/tensor/direct_tensor_store/direct_tensor_store_test.cpp @@ -55,6 +55,18 @@ TEST_F(DirectTensorStoreTest, can_set_and_get_tensor) expect_tensor(exp, ref); } +TEST_F(DirectTensorStoreTest, heap_allocated_memory_is_tracked) +{ + store.store_tensor(make_tensor(5)); + auto mem_1 = store.getMemoryUsage(); + auto ref = store.store_tensor(make_tensor(10)); + size_t tensor_memory_used = store.get_tensor(ref)->count_memory_used(); + auto mem_2 = store.getMemoryUsage(); + EXPECT_GT(tensor_memory_used, 100); + EXPECT_GE(mem_2.allocatedBytes(), mem_1.allocatedBytes() + tensor_memory_used); + EXPECT_GT(mem_2.usedBytes(), mem_1.usedBytes() + tensor_memory_used); +} + TEST_F(DirectTensorStoreTest, invalid_ref_returns_nullptr) { const auto* t = store.get_tensor(EntryRef()); @@ -64,16 +76,18 @@ TEST_F(DirectTensorStoreTest, invalid_ref_returns_nullptr) TEST_F(DirectTensorStoreTest, hold_adds_entry_to_hold_list) { auto ref = store.store_tensor(make_tensor(5)); + size_t tensor_memory_used = store.get_tensor(ref)->count_memory_used(); auto mem_1 = store.getMemoryUsage(); store.holdTensor(ref); auto mem_2 = store.getMemoryUsage(); - EXPECT_GT(mem_2.allocatedBytesOnHold(), mem_1.allocatedBytesOnHold()); + EXPECT_GT(mem_2.allocatedBytesOnHold(), mem_1.allocatedBytesOnHold() + tensor_memory_used); } TEST_F(DirectTensorStoreTest, move_allocates_new_entry_and_puts_old_entry_on_hold) { auto t = make_tensor(5); auto* exp = t.get(); + size_t tensor_memory_used = exp->count_memory_used(); auto ref_1 = store.store_tensor(std::move(t)); auto mem_1 = store.getMemoryUsage(); @@ -82,7 +96,7 @@ TEST_F(DirectTensorStoreTest, move_allocates_new_entry_and_puts_old_entry_on_hol EXPECT_NE(ref_1, ref_2); expect_tensor(exp, ref_1); expect_tensor(exp, ref_2); - EXPECT_GT(mem_2.allocatedBytesOnHold(), mem_1.allocatedBytesOnHold()); + EXPECT_GT(mem_2.allocatedBytesOnHold(), mem_1.allocatedBytesOnHold() + tensor_memory_used); } GTEST_MAIN_RUN_ALL_TESTS() diff --git a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp index 13ec9228fe6..938f9706dc5 100644 --- a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.cpp @@ -10,9 +10,34 @@ namespace search::tensor { constexpr size_t MIN_BUFFER_ARRAYS = 8192; +DirectTensorStore::TensorBufferType::TensorBufferType() + : ParentType(1, MIN_BUFFER_ARRAYS, TensorStoreType::RefType::offsetSize()) +{ +} + +void +DirectTensorStore::TensorBufferType::cleanHold(void* buffer, size_t offset, size_t num_elems, CleanContext clean_ctx) +{ + TensorSP* elem = static_cast(buffer) + offset; + for (size_t i = 0; i < num_elems; ++i) { + clean_ctx.extraBytesCleaned((*elem)->count_memory_used()); + *elem = _emptyEntry; + ++elem; + } +} + +EntryRef +DirectTensorStore::add_entry(TensorSP tensor) +{ + auto ref = _tensor_store.addEntry(tensor); + auto& state = _tensor_store.getBufferState(RefType(ref).bufferId()); + state.incExtraUsedBytes(tensor->count_memory_used()); + return ref; +} + DirectTensorStore::DirectTensorStore() - : TensorStore(_concrete_store), - _concrete_store(MIN_BUFFER_ARRAYS) + : TensorStore(_tensor_store), + _tensor_store(std::make_unique()) { } @@ -22,7 +47,7 @@ DirectTensorStore::get_tensor(EntryRef ref) const if (!ref.valid()) { return nullptr; } - auto entry = _concrete_store.getEntry(ref); + const auto& entry = _tensor_store.getEntry(ref); assert(entry); return entry.get(); } @@ -31,8 +56,7 @@ EntryRef DirectTensorStore::store_tensor(std::unique_ptr tensor) { assert(tensor); - // TODO: Account for heap allocated memory - return _concrete_store.addEntry(TensorSP(tensor.release())); + return add_entry(TensorSP(tensor.release())); } void @@ -41,8 +65,9 @@ DirectTensorStore::holdTensor(EntryRef ref) if (!ref.valid()) { return; } - // TODO: Account for heap allocated memory - _concrete_store.holdElem(ref, 1); + const auto& tensor = _tensor_store.getEntry(ref); + assert(tensor); + _tensor_store.holdElem(ref, 1, tensor->count_memory_used()); } EntryRef @@ -51,11 +76,10 @@ DirectTensorStore::move(EntryRef ref) if (!ref.valid()) { return EntryRef(); } - auto old_tensor = _concrete_store.getEntry(ref); + const auto& old_tensor = _tensor_store.getEntry(ref); assert(old_tensor); - // TODO: Account for heap allocated memory (regular + hold) - auto new_ref = _concrete_store.addEntry(old_tensor); - _concrete_store.holdElem(ref, 1); + auto new_ref = add_entry(old_tensor); + _tensor_store.holdElem(ref, 1, old_tensor->count_memory_used()); return new_ref; } diff --git a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h index 5b9c72ce89c..9b68a3bec12 100644 --- a/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h +++ b/searchlib/src/vespa/searchlib/tensor/direct_tensor_store.h @@ -17,13 +17,25 @@ private: // Note: Must use SP (instead of UP) because of fallbackCopy() and initializeReservedElements() in BufferType, // and implementation of move(). using TensorSP = std::shared_ptr; - using DataStoreType = vespalib::datastore::DataStore; + using TensorStoreType = vespalib::datastore::DataStore; - DataStoreType _concrete_store; + class TensorBufferType : public vespalib::datastore::BufferType { + private: + using ParentType = BufferType; + using ParentType::_emptyEntry; + using CleanContext = typename ParentType::CleanContext; + public: + TensorBufferType(); + virtual void cleanHold(void* buffer, size_t offset, size_t num_elems, CleanContext clean_ctx) override; + }; + + TensorStoreType _tensor_store; + + EntryRef add_entry(TensorSP tensor); public: DirectTensorStore(); - using RefType = DataStoreType::RefType; + using RefType = TensorStoreType::RefType; const Tensor* get_tensor(EntryRef ref) const; EntryRef store_tensor(std::unique_ptr tensor); diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.h b/vespalib/src/vespa/vespalib/datastore/datastore.h index f437616af2a..e67d9049f0b 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastore.h +++ b/vespalib/src/vespa/vespalib/datastore/datastore.h @@ -98,14 +98,17 @@ protected: using ParentType::dropBuffers; using ParentType::initActiveBuffers; using ParentType::addType; + using BufferTypeUP = std::unique_ptr>; + + BufferTypeUP _type; + - BufferType _type; public: - typedef typename ParentType::RefType RefType; + using RefType = typename ParentType::RefType; DataStore(const DataStore &rhs) = delete; DataStore &operator=(const DataStore &rhs) = delete; DataStore(); - DataStore(uint32_t min_arrays); + DataStore(BufferTypeUP type); ~DataStore(); EntryRef addEntry(const EntryType &e); diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp index 71cc52a6838..b66a3b78603 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp +++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp @@ -133,16 +133,16 @@ DataStoreT::freeListRawAllocator(uint32_t typeId) template DataStore::DataStore() - : DataStore(RefType::offsetSize()) + : DataStore(std::make_unique>(1, RefType::offsetSize(), RefType::offsetSize())) { } template -DataStore::DataStore(uint32_t min_arrays) +DataStore::DataStore(BufferTypeUP type) : ParentType(), - _type(1, min_arrays, RefType::offsetSize()) + _type(std::move(type)) { - addType(&_type); + addType(_type.get()); initActiveBuffers(); } -- cgit v1.2.3