summaryrefslogtreecommitdiffstats
path: root/vespamalloc/src/vespamalloc/malloc/common.h
blob: 421575ac5873dcb7392eb0396437405545a6278d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once

#include <new>
#include <atomic>
#include <vespamalloc/util/osmem.h>

extern "C" void MallocRecurseOnSuspend(bool recurse) __attribute__ ((noinline));

namespace vespamalloc {

#define VESPA_DLL_EXPORT __attribute__ ((visibility("default")))

#define NELEMS(a) sizeof(a)/sizeof(a[0])

#define NUM_SIZE_CLASSES 32   // Max 64G

static constexpr uint32_t NUM_THREADS = 16384;

#define UNUSED(a)
#ifdef ENABLE_DEBUG
#define DEBUG(a) a
#else
#define DEBUG(a)
#endif

#ifndef PARANOID_LEVEL
#define PARANOID_LEVEL 0
#endif

#if (PARANOID_LEVEL >= 0)
#define PARANOID_CHECK0(a) a
#else
#define PARANOID_CHECK0(a)
#endif

#if (PARANOID_LEVEL >= 1)
#define PARANOID_CHECK1(a) a
#else
#define PARANOID_CHECK1(a)
#endif

#if (PARANOID_LEVEL >= 2)
#define PARANOID_CHECK2(a) a
#else
#define PARANOID_CHECK2(a)
#endif

#if (PARANOID_LEVEL >= 3)
#define PARANOID_CHECK3(a) a
#else
#define PARANOID_CHECK3(a)
#endif

using OSMemory = MmapMemory;
using SizeClassT = int;

constexpr size_t ALWAYS_REUSE_LIMIT = 0x200000ul;
   
inline constexpr int msbIdx(uint64_t v) {
    return (sizeof(v)*8 - 1) - __builtin_clzl(v);
}    

template <size_t MinClassSizeC>
class CommonT
{
public:
    static constexpr size_t MAX_ALIGN = 0x200000ul;
    enum {MinClassSize = MinClassSizeC};
    static inline constexpr SizeClassT sizeClass(size_t sz) {
        SizeClassT tmp(msbIdx(sz - 1) - (MinClassSizeC - 1));
        return (sz <= (1 << MinClassSizeC )) ? 0 : tmp;
    }
    static inline constexpr size_t classSize(SizeClassT sc) { return (size_t(1) << (sc + MinClassSizeC)); }
};

inline void crash() { *((volatile unsigned *) nullptr) = 0; }

template <typename T>
inline void swap(T & a, T & b)      { T tmp(a); a = b; b = tmp; }

class Mutex
{
public:
    Mutex() : _mutex(), _use(false) { }
    ~Mutex()           { quit(); }
    void lock();
    void unlock();
    static void addThread()      { _threadCount.fetch_add(1); }
    static void subThread()      { _threadCount.fetch_sub(1); }
    static void stopRecursion()  { _stopRecursion = true; }
    static void allowRecursion() { _stopRecursion = false; }
    void init();
    void quit();
private:
    static std::atomic<uint32_t> _threadCount;
    static bool     _stopRecursion;
    Mutex(const Mutex & org);
    Mutex & operator = (const Mutex & org);
    pthread_mutex_t  _mutex;
    bool             _use;
};

class Guard
{
public:
    Guard(Mutex & m);
    ~Guard()                      { _mutex->unlock(); }
private:
    Mutex * _mutex;
};

class IAllocator
{
public:
    virtual ~IAllocator() {}
    virtual bool initThisThread() = 0;
    virtual bool quitThisThread() = 0;
    virtual void enableThreadSupport() = 0;
    virtual void setReturnAddressStop(const void * returnAddressStop) = 0;
    virtual size_t getMaxNumThreads() const = 0;
};

void info();

}