summaryrefslogtreecommitdiffstats
path: root/vespamalloc
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2023-12-05 08:46:57 +0100
committerGitHub <noreply@github.com>2023-12-05 08:46:57 +0100
commit022448ee7265264b12f418381cca099d5eefb34d (patch)
treea360f3f0ed902c08dd8ffd4e077f3bd7ba9a9d9b /vespamalloc
parent05ad919551ee4e6139e8ac03228c57f8a3a3912f (diff)
parentefc97902cfa043bd4c636fbaddaf67fa8ac39e4f (diff)
Merge pull request #28857 from vespa-engine/balder/add-sanity-checks
Balder/add sanity checks
Diffstat (limited to 'vespamalloc')
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.cpp14
-rw-r--r--vespamalloc/src/vespamalloc/malloc/globalpool.h5
-rw-r--r--vespamalloc/src/vespamalloc/malloc/globalpool.hpp11
3 files changed, 23 insertions, 7 deletions
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
index 818a10541ce..f519e768b72 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
@@ -3,26 +3,29 @@
namespace vespamalloc {
-
-void AFListBase::linkInList(AtomicHeadPtr & head, AFListBase * list) noexcept
+void
+AFListBase::linkInList(AtomicHeadPtr & head, AFListBase * list) noexcept
{
AFListBase * tail;
for (tail = list; tail->_next != nullptr ;tail = tail->_next) { }
linkIn(head, list, tail);
}
-void AFListBase::linkIn(AtomicHeadPtr & head, AFListBase * csl, AFListBase * tail) noexcept
+void
+AFListBase::linkIn(AtomicHeadPtr & head, AFListBase * csl, AFListBase * tail) noexcept
{
HeadPtr oldHead = head.load(std::memory_order_relaxed);
HeadPtr newHead(csl, oldHead._tag + 1);
tail->_next = static_cast<AFListBase *>(oldHead._ptr);
+ // linkIn/linkOut performs a release/acquire pair
while ( __builtin_expect(! head.compare_exchange_weak(oldHead, newHead, std::memory_order_release, std::memory_order_relaxed), false) ) {
- newHead._tag = oldHead._tag + 1;
+ newHead._tag = oldHead._tag + 1;
tail->_next = static_cast<AFListBase *>(oldHead._ptr);
}
}
-AFListBase * AFListBase::linkOut(AtomicHeadPtr & head) noexcept
+AFListBase *
+AFListBase::linkOut(AtomicHeadPtr & head) noexcept
{
HeadPtr oldHead = head.load(std::memory_order_relaxed);
auto *csl = static_cast<AFListBase *>(oldHead._ptr);
@@ -30,6 +33,7 @@ AFListBase * AFListBase::linkOut(AtomicHeadPtr & head) noexcept
return nullptr;
}
HeadPtr newHead(csl->_next, oldHead._tag + 1);
+ // linkIn/linkOut performs a release/acquire pair
while ( __builtin_expect(! head.compare_exchange_weak(oldHead, newHead, std::memory_order_acquire, std::memory_order_relaxed), false) ) {
csl = static_cast<AFListBase *>(oldHead._ptr);
if (csl == nullptr) {
diff --git a/vespamalloc/src/vespamalloc/malloc/globalpool.h b/vespamalloc/src/vespamalloc/malloc/globalpool.h
index fea8f8ffff8..97360e0d006 100644
--- a/vespamalloc/src/vespamalloc/malloc/globalpool.h
+++ b/vespamalloc/src/vespamalloc/malloc/globalpool.h
@@ -17,6 +17,8 @@ class AllocPoolT
public:
typedef AFList<MemBlockPtrT> ChunkSList;
AllocPoolT(DataSegment & ds);
+ AllocPoolT(const AllocPoolT & ap) = delete;
+ AllocPoolT & operator = (const AllocPoolT & ap) = delete;
~AllocPoolT();
ChunkSList *getFree(SizeClassT sc, size_t minBlocks);
@@ -40,8 +42,7 @@ private:
ChunkSList * malloc(const Guard & guard, SizeClassT sc) __attribute__((noinline));
ChunkSList * getChunks(const Guard & guard, size_t numChunks) __attribute__((noinline));
ChunkSList * allocChunkList(const Guard & guard) __attribute__((noinline));
- AllocPoolT(const AllocPoolT & ap);
- AllocPoolT & operator = (const AllocPoolT & ap);
+ void validate(const void * ptr) const noexcept;
class AllocFree
{
diff --git a/vespamalloc/src/vespamalloc/malloc/globalpool.hpp b/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
index e68944f118d..2f7ff589492 100644
--- a/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
+++ b/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
@@ -87,6 +87,7 @@ typename AllocPoolT<MemBlockPtrT>::ChunkSList *
AllocPoolT<MemBlockPtrT>::getFree(SizeClassT sc, size_t UNUSED(minBlocks))
{
ChunkSList * csl = getFree(sc);
+ validate(csl);
USE_STAT2(_stat[sc]._getFree.fetch_add(1, std::memory_order_relaxed));
return csl;
}
@@ -97,8 +98,10 @@ AllocPoolT<MemBlockPtrT>::exchangeFree(SizeClassT sc, typename AllocPoolT<MemBlo
{
PARANOID_CHECK1( if (csl->empty() || (csl->count() > ChunkSList::NumBlocks)) { *(int*)0 = 0; } );
AllocFree & af = _scList[sc];
+ validate(af._full.load(std::memory_order_relaxed)._ptr);
ChunkSList::linkIn(af._full, csl, csl);
ChunkSList *ncsl = getFree(sc);
+ validate(ncsl);
USE_STAT2(_stat[sc]._exchangeFree.fetch_add(1, std::memory_order_relaxed));
return ncsl;
}
@@ -109,14 +112,22 @@ AllocPoolT<MemBlockPtrT>::exchangeAlloc(SizeClassT sc, typename AllocPoolT<MemBl
{
PARANOID_CHECK1( if ( ! csl->empty()) { *(int*)0 = 0; } );
AllocFree & af = _scList[sc];
+ validate(af._empty.load(std::memory_order_relaxed)._ptr);
ChunkSList::linkIn(af._empty, csl, csl);
ChunkSList * ncsl = getAlloc(sc);
+ validate(ncsl);
USE_STAT2(_stat[sc]._exchangeAlloc.fetch_add(1, std::memory_order_relaxed));
PARANOID_CHECK1( if (ncsl->empty() || (ncsl->count() > ChunkSList::NumBlocks)) { *(int*)0 = 0; } );
return ncsl;
}
template <typename MemBlockPtrT>
+void
+AllocPoolT<MemBlockPtrT>::validate(const void * ptr) const noexcept {
+ assert((ptr == nullptr) || _dataSegment.containsPtr(ptr));
+}
+
+template <typename MemBlockPtrT>
typename AllocPoolT<MemBlockPtrT>::ChunkSList *
AllocPoolT<MemBlockPtrT>::exactAlloc(size_t exactSize, SizeClassT sc,
typename AllocPoolT<MemBlockPtrT>::ChunkSList * csl)