aboutsummaryrefslogtreecommitdiffstats
path: root/vespamalloc/src/vespamalloc/malloc/common.h
blob: d81062d7fe372741ace88c42f9a8f8e7b9263ac4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once

#include <new>
#include <atomic>
#include <cassert>
#include <cstdio>
#include <vespamalloc/util/osmem.h>
#include <thread>

extern "C" void MallocRecurseOnSuspend(bool recurse) __attribute__ ((noinline));

namespace vespamalloc {

#define VESPA_DLL_EXPORT __attribute__ ((visibility("default")))

#define NELEMS(a) sizeof(a)/sizeof(a[0])

#define NUM_SIZE_CLASSES 32   // Max 64G

static constexpr uint32_t NUM_THREADS = 16384;

#define UNUSED(a)
#ifdef ENABLE_DEBUG
#define DEBUG(a) a
#else
#define DEBUG(a)
#endif

#ifndef PARANOID_LEVEL
#define PARANOID_LEVEL 0
#endif

#if (PARANOID_LEVEL >= 0)
#define PARANOID_CHECK0(a) a
#else
#define PARANOID_CHECK0(a)
#endif

#if (PARANOID_LEVEL >= 1)
#define PARANOID_CHECK1(a) a
#else
#define PARANOID_CHECK1(a)
#endif

#if (PARANOID_LEVEL >= 2)
#define PARANOID_CHECK2(a) a
#else
#define PARANOID_CHECK2(a)
#endif

#if (PARANOID_LEVEL >= 3)
#define PARANOID_CHECK3(a) a
#else
#define PARANOID_CHECK3(a)
#endif

using OSMemory = MmapMemory;
using SizeClassT = int;

constexpr size_t ALWAYS_REUSE_LIMIT = 0x100000ul;
constexpr uint8_t MAX_PTR_BITS = 57;  // Maximum number of bits a pointer can use (Intel IceLake)
constexpr uint64_t MAX_PTR = 1ul << MAX_PTR_BITS;

inline constexpr int
msbIdx(uint64_t v) {
    return (sizeof(v) * 8 - 1) - __builtin_clzl(v);
}

template<size_t MinClassSizeC>
class CommonT {
public:
    static constexpr size_t MAX_ALIGN = 0x200000ul;
    enum {
        MinClassSize = MinClassSizeC
    };
    static constexpr SizeClassT sizeClass(size_t sz) noexcept {
        SizeClassT tmp(msbIdx(sz - 1) - (MinClassSizeC - 1));
        return (sz <= (1 << MinClassSizeC)) ? 0 : tmp;
    }
    static constexpr size_t classSize(SizeClassT sc) noexcept { return (size_t(1) << (sc + MinClassSizeC)); }
};

class Mutex {
public:
    Mutex() : _mutex(), _use(false) { }
    ~Mutex() { quit(); }
    void lock();
    void unlock();
    static void addThread()      { _threadCount.fetch_add(1); }
    static void subThread()      { _threadCount.fetch_sub(1); }
    static void stopRecursion()  { _stopRecursion = true; }
    static void allowRecursion() { _stopRecursion = false; }
    void init();
    void quit();
private:
    static std::atomic<uint32_t> _threadCount;
    static bool _stopRecursion;
    Mutex(const Mutex &org);
    Mutex &operator=(const Mutex &org);
    pthread_mutex_t _mutex;
    bool _use;
};

class Guard {
public:
    Guard(Mutex & m);
    ~Guard() { _mutex->unlock(); }
private:
    Mutex *_mutex;
};

class IAllocator {
public:
    virtual ~IAllocator() {}
    virtual bool initThisThread() = 0;
    virtual bool quitThisThread() = 0;
    virtual void enableThreadSupport() = 0;
    virtual void setReturnAddressStop(const void *returnAddressStop) = 0;
    virtual size_t getMaxNumThreads() const = 0;
};

void info();
void logBigBlock(const void *ptr, size_t exact, size_t adjusted, size_t gross) __attribute__((noinline));
void logStackTrace() __attribute__((noinline));

#define ASSERT_STACKTRACE(a) { \
    if ( __builtin_expect(!(a), false) ) {  \
        vespamalloc::logStackTrace();       \
        assert(a);                          \
    }                                       \
}

extern FILE * _G_logFile;
extern size_t _G_bigBlockLimit;

}