summaryrefslogtreecommitdiffstats
path: root/vespamalloc
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2016-12-22 08:48:46 +0100
committerHenning Baldersheim <balder@yahoo-inc.com>2017-01-06 23:21:15 +0100
commit56b220ba1cf20f0ccb443ee684239622b3f6376b (patch)
tree07fd623d4a6d01ef0ff75436962706003116968e /vespamalloc
parentb815ae29f5a58f271114df351d4d01785413c1da (diff)
Break dependency on vespalib, use std::atomic instead.
Diffstat (limited to 'vespamalloc')
-rw-r--r--vespamalloc/CMakeLists.txt2
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.cpp6
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.h55
-rw-r--r--vespamalloc/src/vespamalloc/malloc/common.cpp4
-rw-r--r--vespamalloc/src/vespamalloc/malloc/common.h20
-rw-r--r--vespamalloc/src/vespamalloc/malloc/datasegment.h2
-rw-r--r--vespamalloc/src/vespamalloc/malloc/globalpool.h26
-rw-r--r--vespamalloc/src/vespamalloc/malloc/globalpool.hpp30
-rw-r--r--vespamalloc/src/vespamalloc/malloc/threadlist.h6
-rw-r--r--vespamalloc/src/vespamalloc/malloc/threadlist.hpp8
-rw-r--r--vespamalloc/src/vespamalloc/malloc/threadproxy.cpp11
-rw-r--r--vespamalloc/src/vespamalloc/malloc/threadproxy.h2
-rw-r--r--vespamalloc/src/vespamalloc/util/index.h12
-rw-r--r--vespamalloc/src/vespamalloc/util/osmem.h8
14 files changed, 120 insertions, 72 deletions
diff --git a/vespamalloc/CMakeLists.txt b/vespamalloc/CMakeLists.txt
index ef629698590..11bc1b60983 100644
--- a/vespamalloc/CMakeLists.txt
+++ b/vespamalloc/CMakeLists.txt
@@ -3,7 +3,7 @@ add_compile_options(-fvisibility=hidden)
add_definitions(-DPARANOID_LEVEL=0)
vespa_define_module(
- DEPENDS
+ TEST_DEPENDS
fastos
vespalib
vespalog
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
index 1a21e6f1c14..03165398423 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.cpp
@@ -1,5 +1,5 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespamalloc/malloc/allocchunk.h>
+#include "allocchunk.h"
namespace vespamalloc {
@@ -12,9 +12,7 @@ void AFListBase::init()
_link = new (_atomicLinkSpace)AtomicLink();
}
-AFListBase::LinkI::~LinkI()
-{
-}
+AFListBase::LinkI::~LinkI() { }
void AFListBase::linkInList(HeadPtr & head, AFListBase * list)
{
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.h b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
index 48128e12687..383459a897c 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.h
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
@@ -1,15 +1,66 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/common.h>
+#include "common.h"
#include <algorithm>
namespace vespamalloc {
+#define ATOMIC_TAGGEDPTR_ALIGNMENT __attribute__ ((aligned (16)))
+
+/**
+ * Copied from vespalib to avoid code dependencies.
+ */
+class Atomic {
+public:
+ /**
+ * @brief Pointer and tag - use instead of bare pointer for cmpSwap()
+ *
+ * When making a lock-free data structure by using cmpSwap
+ * on pointers, you'll often run into the "ABA problem", see
+ * http://en.wikipedia.org/wiki/ABA_problem for details.
+ * The TaggedPtr makes it easy to do the woraround with tag bits,
+ * but requires the double-word compare-and-swap instruction.
+ * Very early Amd K7/8 CPUs are lacking this and will fail (Illegal Instruction).
+ **/
+ struct TaggedPtr {
+ TaggedPtr() : _ptr(nullptr), _tag(0) {}
+
+ TaggedPtr(void *h, size_t t) : _ptr(h), _tag(t) {}
+
+ void *_ptr;
+ size_t _tag;
+ };
+
+ static bool cmpSwap(volatile TaggedPtr *dest, TaggedPtr newVal, TaggedPtr oldVal) {
+ char result;
+ void *ptr;
+ size_t tag;
+#if defined(__x86_64__)
+ __asm__ volatile ("lock ;"
+ "cmpxchg16b %8;"
+ "setz %1;"
+ : "=m" (*dest),
+ "=q" (result),
+ "=a" (ptr),
+ "=d" (tag)
+ : "a" (oldVal._ptr),
+ "d" (oldVal._tag),
+ "b" (newVal._ptr),
+ "c" (newVal._tag),
+ "m" (*dest)
+ : "memory");
+#else
+#error "Only supports X86_64"
+#endif
+ return result;
+ }
+};
+
class AFListBase
{
public:
- typedef Atomic::TaggedPtr HeadPtr;
+ using HeadPtr = Atomic::TaggedPtr;
AFListBase() : _next(NULL) { }
void setNext(AFListBase * csl) { _next = csl; }
static void init();
diff --git a/vespamalloc/src/vespamalloc/malloc/common.cpp b/vespamalloc/src/vespamalloc/malloc/common.cpp
index d14a0317630..ed0551cd853 100644
--- a/vespamalloc/src/vespamalloc/malloc/common.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/common.cpp
@@ -1,10 +1,10 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespamalloc/malloc/common.h>
+#include "common.h"
#include <pthread.h>
namespace vespamalloc {
-uint32_t Mutex::_threadCount = 0;
+std::atomic<uint32_t> Mutex::_threadCount(0);
bool Mutex::_stopRecursion = true;
void Mutex::lock()
diff --git a/vespamalloc/src/vespamalloc/malloc/common.h b/vespamalloc/src/vespamalloc/malloc/common.h
index ee08cfbafaa..428eb3c7a83 100644
--- a/vespamalloc/src/vespamalloc/malloc/common.h
+++ b/vespamalloc/src/vespamalloc/malloc/common.h
@@ -1,13 +1,10 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vespalib/util/atomic.h>
-#include <vespa/vespalib/util/optimized.h>
#include <new>
+#include <atomic>
#include <vespamalloc/util/osmem.h>
-using vespalib::Atomic;
-
extern "C" void MallocRecurseOnSuspend(bool recurse) __attribute__ ((noinline));
namespace vespamalloc {
@@ -58,13 +55,20 @@ typedef MmapMemory OSMemory;
typedef int SizeClassT;
+
+inline int msbIdx(uint64_t v) {
+ int64_t result;
+ __asm __volatile("bsrq %0,%0" : "=r" (result) : "0" (v));
+ return result;
+}
+
template <size_t MinClassSizeC>
class CommonT
{
public:
enum {MinClassSize = MinClassSizeC};
static inline SizeClassT sizeClass(size_t sz) {
- SizeClassT tmp(vespalib::Optimized::msbIdx(sz - 1) - (MinClassSizeC - 1));
+ SizeClassT tmp(msbIdx(sz - 1) - (MinClassSizeC - 1));
return (sz <= (1 << MinClassSizeC )) ? 0 : tmp;
}
static inline size_t classSize(SizeClassT sc) { return (size_t(1) << (sc + MinClassSizeC)); }
@@ -82,14 +86,14 @@ public:
~Mutex() { quit(); }
void lock();
void unlock();
- static void addThread() { Atomic::postInc(&_threadCount); }
- static void subThread() { Atomic::postDec(&_threadCount); }
+ static void addThread() { _threadCount++; }
+ static void subThread() { _threadCount--; }
static void stopRecursion() { _stopRecursion = true; }
static void allowRecursion() { _stopRecursion = false; }
void init();
void quit();
private:
- static uint32_t _threadCount;
+ static std::atomic<uint32_t> _threadCount;
static bool _stopRecursion;
Mutex(const Mutex & org);
Mutex & operator = (const Mutex & org);
diff --git a/vespamalloc/src/vespamalloc/malloc/datasegment.h b/vespamalloc/src/vespamalloc/malloc/datasegment.h
index c50d43dc1d8..50f19233f04 100644
--- a/vespamalloc/src/vespamalloc/malloc/datasegment.h
+++ b/vespamalloc/src/vespamalloc/malloc/datasegment.h
@@ -1,7 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <limits.h>
+#include <climits>
#include <memory>
#include <vespamalloc/malloc/common.h>
#include <vespamalloc/util/traceutil.h>
diff --git a/vespamalloc/src/vespamalloc/malloc/globalpool.h b/vespamalloc/src/vespamalloc/malloc/globalpool.h
index 0669780b796..41b1418ccbf 100644
--- a/vespamalloc/src/vespamalloc/malloc/globalpool.h
+++ b/vespamalloc/src/vespamalloc/malloc/globalpool.h
@@ -1,9 +1,9 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/common.h>
-#include <vespamalloc/malloc/allocchunk.h>
-#include <vespamalloc/malloc/datasegment.h>
+#include "common.h"
+#include "allocchunk.h"
+#include "datasegment.h"
#include <algorithm>
#define USE_STAT2(a) a
@@ -58,13 +58,13 @@ private:
_exchangeFree(0),
_exactAlloc(0),
_return(0),_malloc(0) { }
- size_t _getAlloc;
- size_t _getFree;
- size_t _exchangeAlloc;
- size_t _exchangeFree;
- size_t _exactAlloc;
- size_t _return;
- size_t _malloc;
+ std::atomic<size_t> _getAlloc;
+ std::atomic<size_t> _getFree;
+ std::atomic<size_t> _exchangeAlloc;
+ std::atomic<size_t> _exchangeFree;
+ std::atomic<size_t> _exactAlloc;
+ std::atomic<size_t> _return;
+ std::atomic<size_t> _malloc;
bool isUsed() const {
// Do not count _getFree.
return (_getAlloc || _exchangeAlloc || _exchangeFree || _exactAlloc || _return || _malloc);
@@ -73,11 +73,11 @@ private:
Mutex _mutex;
ChunkSList * _chunkPool;
- AllocFree _scList[NUM_SIZE_CLASSES] VESPALIB_ATOMIC_TAGGEDPTR_ALIGNMENT;
+ AllocFree _scList[NUM_SIZE_CLASSES] ATOMIC_TAGGEDPTR_ALIGNMENT;
DataSegment<MemBlockPtrT> & _dataSegment;
- size_t _getChunks;
+ std::atomic<size_t> _getChunks;
size_t _getChunksSum;
- size_t _allocChunkList;
+ std::atomic<size_t> _allocChunkList;
Stat _stat[NUM_SIZE_CLASSES];
static size_t _threadCacheLimit __attribute__((visibility("hidden")));
static size_t _alwaysReuseLimit __attribute__((visibility("hidden")));
diff --git a/vespamalloc/src/vespamalloc/malloc/globalpool.hpp b/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
index b620c388fb6..c2893817ab7 100644
--- a/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
+++ b/vespamalloc/src/vespamalloc/malloc/globalpool.hpp
@@ -1,12 +1,10 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/globalpool.h>
+#include "globalpool.h"
#define USE_STAT2(a) a
-using vespalib::Atomic;
-
namespace vespamalloc {
template <typename MemBlockPtrT>
@@ -76,7 +74,7 @@ AllocPoolT<MemBlockPtrT>::getAlloc(SizeClassT sc)
return NULL;
}
}
- USE_STAT2(Atomic::postInc(&_stat[sc]._getAlloc));
+ USE_STAT2(_stat[sc]._getAlloc++);
}
PARANOID_CHECK1( if (csl->empty() || (csl->count() > ChunkSList::NumBlocks)) { *(int*)0 = 0; } );
return csl;
@@ -87,7 +85,7 @@ typename AllocPoolT<MemBlockPtrT>::ChunkSList *
AllocPoolT<MemBlockPtrT>::getFree(SizeClassT sc, size_t UNUSED(minBlocks))
{
ChunkSList * csl = getFree(sc);
- USE_STAT2(Atomic::postInc(&_stat[sc]._getFree));
+ USE_STAT2(_stat[sc]._getFree.fetch_add(1));
return csl;
}
@@ -99,7 +97,7 @@ AllocPoolT<MemBlockPtrT>::exchangeFree(SizeClassT sc, typename AllocPoolT<MemBlo
AllocFree & af = _scList[sc];
ChunkSList::linkIn(af._full, csl, csl);
ChunkSList *ncsl = getFree(sc);
- USE_STAT2(Atomic::postInc(&_stat[sc]._exchangeFree));
+ USE_STAT2(_stat[sc]._exchangeFree.fetch_add(1));
return ncsl;
}
@@ -111,7 +109,7 @@ AllocPoolT<MemBlockPtrT>::exchangeAlloc(SizeClassT sc, typename AllocPoolT<MemBl
AllocFree & af = _scList[sc];
ChunkSList::linkIn(af._empty, csl, csl);
ChunkSList * ncsl = getAlloc(sc);
- USE_STAT2(Atomic::postInc(&_stat[sc]._exchangeAlloc));
+ USE_STAT2(_stat[sc]._exchangeAlloc.fetch_add(1));
PARANOID_CHECK1( if (ncsl->empty() || (ncsl->count() > ChunkSList::NumBlocks)) { *(int*)0 = 0; } );
return ncsl;
}
@@ -126,7 +124,7 @@ AllocPoolT<MemBlockPtrT>::exactAlloc(size_t exactSize, SizeClassT sc,
MemBlockPtrT mem(exactBlock, MemBlockPtrT::unAdjustSize(adjustedSize));
csl->add(mem);
ChunkSList * ncsl = csl;
- USE_STAT2(Atomic::postInc(&_stat[sc]._exactAlloc));
+ USE_STAT2(_stat[sc]._exactAlloc.fetch_add(1));
mem.logBigBlock(exactSize, mem.adjustSize(exactSize), MemBlockPtrT::classSize(sc));
PARANOID_CHECK1( if (ncsl->empty() || (ncsl->count() > ChunkSList::NumBlocks)) { *(int*)0 = 0; } );
return ncsl;
@@ -149,7 +147,7 @@ AllocPoolT<MemBlockPtrT>::returnMemory(SizeClassT sc,
}
completelyEmpty = csl;
#endif
- USE_STAT2(Atomic::postInc(&_stat[sc]._return));
+ USE_STAT2(_stat[sc]._return.fetch_add(1));
return completelyEmpty;
}
@@ -193,7 +191,7 @@ AllocPoolT<MemBlockPtrT>::malloc(const Guard & guard, SizeClassT sc)
}
}
PARANOID_CHECK1( for (ChunkSList * c(csl); c; c = c->getNext()) { if (c->empty()) { *(int*)1 = 1; } } );
- USE_STAT2(Atomic::postInc(&_stat[sc]._malloc));
+ USE_STAT2(_stat[sc]._malloc.fetch_add(1));
return csl;
}
@@ -223,7 +221,7 @@ AllocPoolT<MemBlockPtrT>::getChunks(const Guard & guard, size_t numChunks)
} else {
csl = NULL;
}
- USE_STAT2(Atomic::postInc(&_getChunks));
+ USE_STAT2(_getChunks.fetch_add(1));
USE_STAT2(_getChunksSum+=numChunks);
PARANOID_CHECK1( for (ChunkSList * c(csl); c; c = c->getNext()) { if ( ! c->empty()) { *(int*)1 = 1; } } );
return csl;
@@ -245,7 +243,7 @@ AllocPoolT<MemBlockPtrT>::allocChunkList(const Guard & guard)
}
newList[chunksInBlock-1].setNext(NULL);
}
- USE_STAT2(Atomic::postInc(&_allocChunkList));
+ USE_STAT2(_allocChunkList.fetch_add(1));
return newList;
}
@@ -254,16 +252,16 @@ void AllocPoolT<MemBlockPtrT>::info(FILE * os, size_t level)
{
if (level > 0) {
fprintf(os, "GlobalPool getChunks(%ld, %ld) allocChunksList(%ld):\n",
- _getChunks, _getChunksSum, _allocChunkList);
+ _getChunks.load(), _getChunksSum, _allocChunkList.load());
for (size_t i = 0; i < NELEMS(_stat); i++) {
const Stat & s = _stat[i];
if (s.isUsed()) {
fprintf(os, "SC %2ld(%10ld) GetAlloc(%6ld) GetFree(%6ld) "
"ExChangeAlloc(%6ld) ExChangeFree(%6ld) ExactAlloc(%6ld) "
"Returned(%6ld) Malloc(%6ld)\n",
- i, MemBlockPtrT::classSize(i), s._getAlloc, s._getFree,
- s._exchangeAlloc, s._exchangeFree, s._exactAlloc,
- s._return, s._malloc);
+ i, MemBlockPtrT::classSize(i), s._getAlloc.load(), s._getFree.load(),
+ s._exchangeAlloc.load(), s._exchangeFree.load(), s._exactAlloc.load(),
+ s._return.load(), s._malloc.load());
}
}
}
diff --git a/vespamalloc/src/vespamalloc/malloc/threadlist.h b/vespamalloc/src/vespamalloc/malloc/threadlist.h
index 9901c9f6960..875aec2942f 100644
--- a/vespamalloc/src/vespamalloc/malloc/threadlist.h
+++ b/vespamalloc/src/vespamalloc/malloc/threadlist.h
@@ -1,7 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/threadpool.h>
+#include "threadpool.h"
namespace vespamalloc {
@@ -41,8 +41,8 @@ private:
ThreadListT & operator = (const ThreadListT & tl);
enum {ThreadStackSize=2048*1024};
volatile bool _isThreaded;
- volatile size_t _threadCount;
- volatile size_t _threadCountAccum;
+ std::atomic<size_t> _threadCount;
+ std::atomic<size_t> _threadCountAccum;
ThreadPool _threadVector[NUM_THREADS];
AllocPoolT<MemBlockPtrT> & _allocPool;
static __thread ThreadPool * _myPool TLS_LINKAGE;
diff --git a/vespamalloc/src/vespamalloc/malloc/threadlist.hpp b/vespamalloc/src/vespamalloc/malloc/threadlist.hpp
index a1ea517beed..8aa9b6a90b5 100644
--- a/vespamalloc/src/vespamalloc/malloc/threadlist.hpp
+++ b/vespamalloc/src/vespamalloc/malloc/threadlist.hpp
@@ -1,7 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/threadlist.h>
+#include "threadlist.h"
namespace vespamalloc {
@@ -48,7 +48,7 @@ bool ThreadListT<MemBlockPtrT, ThreadStatT>::quitThisThread()
{
ThreadPool & tp = getCurrent();
tp.quit();
- Atomic::postDec(&_threadCount);
+ _threadCount.fetch_sub(1);
return true;
}
@@ -56,8 +56,8 @@ template <typename MemBlockPtrT, typename ThreadStatT>
bool ThreadListT<MemBlockPtrT, ThreadStatT>::initThisThread()
{
bool retval(true);
- Atomic::postInc(&_threadCount);
- size_t lidAccum = Atomic::postInc(&_threadCountAccum);
+ _threadCount.fetch_add(1);
+ size_t lidAccum = _threadCountAccum.fetch_add(1);
long localId(-1);
for(size_t i = 0; (localId < 0) && (i < getMaxNumThreads()); i++) {
ThreadPool & tp = _threadVector[i];
diff --git a/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp b/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
index 17da09f9b35..e5ded83e5d4 100644
--- a/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
@@ -1,9 +1,6 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespamalloc/malloc/threadproxy.h>
+#include "threadproxy.h"
#include <dlfcn.h>
-#include <pthread.h>
-#include <cstdio>
-#include <cerrno>
namespace vespamalloc {
@@ -34,7 +31,7 @@ typedef int (*pthread_create_function) (pthread_t *thread,
int linuxthreads_pthread_getattr_np(pthread_t pid, pthread_attr_t *dst);
static void * _G_mallocThreadProxyReturnAddress = NULL;
-static volatile size_t _G_threadCount = 1; // You always have the main thread.
+static volatile std::atomic<size_t> _G_threadCount(1); // You always have the main thread.
static void cleanupThread(void * arg)
{
@@ -42,7 +39,7 @@ static void cleanupThread(void * arg)
delete ta;
vespamalloc::_G_myMemP->quitThisThread();
vespamalloc::Mutex::subThread();
- vespalib::Atomic::postDec(&_G_threadCount);
+ _G_threadCount.fetch_sub(1);
}
void * mallocThreadProxy (void * arg)
@@ -79,7 +76,7 @@ VESPA_DLL_EXPORT int local_pthread_create (pthread_t *thread,
{
size_t numThreads;
for (numThreads = _G_threadCount
- ;(numThreads < vespamalloc::_G_myMemP->getMaxNumThreads()) && ! vespalib::Atomic::cmpSwap(&_G_threadCount, numThreads+1, numThreads)
+ ;(numThreads < vespamalloc::_G_myMemP->getMaxNumThreads()) && ! _G_threadCount.compare_exchange_strong(numThreads, numThreads+1)
; numThreads = _G_threadCount) {
}
if (numThreads >= vespamalloc::_G_myMemP->getMaxNumThreads()) {
diff --git a/vespamalloc/src/vespamalloc/malloc/threadproxy.h b/vespamalloc/src/vespamalloc/malloc/threadproxy.h
index 4865e5fbd5f..0d86bef9e95 100644
--- a/vespamalloc/src/vespamalloc/malloc/threadproxy.h
+++ b/vespamalloc/src/vespamalloc/malloc/threadproxy.h
@@ -1,7 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespamalloc/malloc/common.h>
+#include "common.h"
namespace vespamalloc {
diff --git a/vespamalloc/src/vespamalloc/util/index.h b/vespamalloc/src/vespamalloc/util/index.h
index f7513114edc..59c9b3a1801 100644
--- a/vespamalloc/src/vespamalloc/util/index.h
+++ b/vespamalloc/src/vespamalloc/util/index.h
@@ -1,7 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vespalib/util/atomic.h>
+#include <atomic>
#include <stdio.h>
namespace vespamalloc {
@@ -26,12 +26,12 @@ public:
typedef size_t index_t;
AtomicIndex(index_t index = 0) : _index(index) { }
operator index_t () const { return _index; }
- index_t operator ++ (int) { return vespalib::Atomic::postInc(&_index); }
- index_t operator -- (int) { return vespalib::Atomic::postDec(&_index); }
- index_t operator += (index_t v) { return _index += v; }
- index_t operator -= (index_t v) { return _index -= v; }
+ index_t operator ++ (int) { return _index.fetch_add(1); }
+ index_t operator -- (int) { return _index.fetch_sub(1); }
+ index_t operator += (index_t v) { return _index.fetch_add(v); }
+ index_t operator -= (index_t v) { return _index.fetch_sub(v); }
private:
- index_t _index;
+ std::atomic<index_t> _index;
};
}
diff --git a/vespamalloc/src/vespamalloc/util/osmem.h b/vespamalloc/src/vespamalloc/util/osmem.h
index f5c51c2000d..b95f8ac72e8 100644
--- a/vespamalloc/src/vespamalloc/util/osmem.h
+++ b/vespamalloc/src/vespamalloc/util/osmem.h
@@ -1,11 +1,11 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <ctype.h>
-#include <stdlib.h>
+#include <cctype>
+#include <cstdlib>
#include <unistd.h>
-#include <assert.h>
-#include <string.h>
+#include <cassert>
+#include <cstring>
#include <algorithm>
namespace vespamalloc {