summaryrefslogtreecommitdiffstats
path: root/vespamalloc
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-04-19 01:37:24 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2020-04-19 01:37:24 +0000
commit499c47d54b37b067e764821e232f78ab643a5b98 (patch)
tree5012b9a3a56cce013abaa3c7bf0a56a3b4eda1ea /vespamalloc
parent6a0b64f4be7faaa20265c57e99912abb24eeede2 (diff)
Since gcc does not provide lock free 16 byte access we must do so ourselves.
Diffstat (limited to 'vespamalloc')
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.cpp16
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.h36
2 files changed, 42 insertions, 10 deletions
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
index 41165244e5c..fb95018e94e 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
@@ -7,7 +7,7 @@ namespace vespamalloc {
void AFListBase::linkInList(AtomicHeadPtr & head, AFListBase * list)
{
AFListBase * tail;
- for (tail = list; tail->_next != NULL ;tail = tail->_next) { }
+ for (tail = list; tail->_next != nullptr ;tail = tail->_next) { }
linkIn(head, list, tail);
}
@@ -16,7 +16,7 @@ void AFListBase::linkIn(AtomicHeadPtr & head, AFListBase * csl, AFListBase * tai
HeadPtr oldHead = head.load(std::memory_order_relaxed);
HeadPtr newHead(csl, oldHead._tag + 1);
tail->_next = static_cast<AFListBase *>(oldHead._ptr);
- while ( ! head.compare_exchange_weak(oldHead, newHead, std::memory_order_release, std::memory_order_relaxed) ) {
+ while ( __builtin_expect(! head.compare_exchange_weak(oldHead, newHead, std::memory_order_release, std::memory_order_relaxed), false) ) {
newHead._tag = oldHead._tag + 1;
tail->_next = static_cast<AFListBase *>(oldHead._ptr);
}
@@ -26,19 +26,19 @@ AFListBase * AFListBase::linkOut(AtomicHeadPtr & head)
{
HeadPtr oldHead = head.load(std::memory_order_relaxed);
AFListBase *csl = static_cast<AFListBase *>(oldHead._ptr);
- if (csl == NULL) {
- return NULL;
+ if (csl == nullptr) {
+ return nullptr;
}
HeadPtr newHead(csl->_next, oldHead._tag + 1);
- while ( ! head.compare_exchange_weak(oldHead, newHead, std::memory_order_acquire, std::memory_order_relaxed) ) {
+ while ( __builtin_expect(! head.compare_exchange_weak(oldHead, newHead, std::memory_order_acquire, std::memory_order_relaxed), false) ) {
csl = static_cast<AFListBase *>(oldHead._ptr);
- if (csl == NULL) {
- return NULL;
+ if (csl == nullptr) {
+ return nullptr;
}
newHead._ptr = csl->_next;
newHead._tag = oldHead._tag + 1;
}
- csl->_next = NULL;
+ csl->_next = nullptr;
return csl;
}
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.h b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
index 8b6a2ce3fc8..64f45067e79 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.h
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
@@ -20,16 +20,48 @@ struct TaggedPtr {
TaggedPtr() noexcept : _ptr(nullptr), _tag(0) { }
TaggedPtr(void *h, size_t t) noexcept : _ptr(h), _tag(t) {}
+#if defined(__x86_64__)
+#define VESPA_USE_ATOMIC_TAGGEDPTR
+ TaggedPtr load(std::memory_order = std::memory_order_seq_cst) {
+ return *this;
+ }
+ void store(TaggedPtr ptr) {
+ *this = ptr;
+ }
+ bool
+ compare_exchange_weak(TaggedPtr & oldPtr, TaggedPtr newPtr, std::memory_order, std::memory_order) {
+ char result;
+ __asm__ volatile (
+ "lock ;"
+ "cmpxchg16b %6;"
+ "setz %1;"
+ : "+m" (*this),
+ "=q" (result),
+ "+a" (oldPtr._ptr),
+ "+d" (oldPtr._tag)
+ : "b" (newPtr._ptr),
+ "c" (newPtr._tag)
+ : "cc", "memory"
+ );
+ return result;
+ }
+#endif
+
void *_ptr;
size_t _tag;
-};
+} __attribute__ ((aligned (16)));
class AFListBase
{
public:
using HeadPtr = TaggedPtr;
+#ifdef VESPA_USE_ATOMIC_TAGGEDPTR
+ using AtomicHeadPtr = HeadPtr;
+#else
using AtomicHeadPtr = std::atomic<HeadPtr>;
- AFListBase() : _next(NULL) { }
+#endif
+
+ AFListBase() : _next(nullptr) { }
void setNext(AFListBase * csl) { _next = csl; }
static void init();
static void linkInList(AtomicHeadPtr & head, AFListBase * list);