aboutsummaryrefslogtreecommitdiffstats
path: root/vespamalloc/src/vespamalloc/malloc/threadlist.hpp
blob: 095d19159c559cca22de45cfa14d55d7c85ae964 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once

#include "threadlist.h"
#include <malloc.h>

namespace vespamalloc {

namespace {
    const char * VESPA_MALLOC_MMAP_THRESHOLD = "VESPA_MALLOC_MMAP_THRESHOLD";
}

template <typename MemBlockPtrT, typename ThreadStatT>
ThreadListT<MemBlockPtrT, ThreadStatT>::ThreadListT(AllocPool & allocPool, MMapPool & mmapPool) :
    _isThreaded(false),
    _threadCount(0),
    _threadCountAccum(0),
    _allocPool(allocPool),
    _mmapPool(mmapPool)
{
    const char * mmapThresholdS = getenv(VESPA_MALLOC_MMAP_THRESHOLD);
    int mmapThreshold = (mmapThresholdS != nullptr)
            ? strtol(mmapThresholdS, nullptr, 0)
            : MMAP_LIMIT_DEFAULT;
    for (size_t i = 0; i < getMaxNumThreads(); i++) {
        auto & thread = _threadVector[i];
        thread.setPool(_allocPool, _mmapPool);
        thread.mallopt(M_MMAP_THRESHOLD, mmapThreshold);
    }
}

template <typename MemBlockPtrT, typename ThreadStatT>
ThreadListT<MemBlockPtrT, ThreadStatT>::~ThreadListT() = default;

template <typename MemBlockPtrT, typename ThreadStatT>
void ThreadListT<MemBlockPtrT, ThreadStatT>::info(FILE * os, size_t level)
{
    size_t peakThreads(0);
    size_t activeThreads(0);
    for (size_t i(0); i < getMaxNumThreads(); i++) {
        const ThreadPool & thread = _threadVector[i];
        if (thread.isActive()) {
            activeThreads++;
            peakThreads = i;
        }
    }
    fprintf(os, "#%ld active threads. Peak threads #%ld. %u threads created in total.\n",
            activeThreads, peakThreads, _threadCountAccum.load());
    if ((level > 1) && ! ThreadStatT::isDummy()) {
        for (SizeClassT sc(0); sc < NUM_SIZE_CLASSES; sc++) {
            _allocPool.dataSegment().infoThread(os, level, 0, sc, _threadCountAccum.load() + 1);
        }
    }
    for (size_t i(0); i < getMaxNumThreads(); i++) {
        const ThreadPool & thread = _threadVector[i];
        if (thread.isActive()) {
            if ( ! ThreadStatT::isDummy()) {
                if (thread.isUsed()) {
                    fprintf(os, "Thread #%u = pid # %d\n", thread.threadId(), thread.osThreadId());
                    thread.info(os, level, _allocPool.dataSegment());
                }
            }
        }
    }
}

template <typename MemBlockPtrT, typename ThreadStatT>
bool ThreadListT<MemBlockPtrT, ThreadStatT>::quitThisThread()
{
    ThreadPool & tp = getCurrent();
    tp.quit();
    _threadCount.fetch_sub(1);
    return true;
}

template <typename MemBlockPtrT, typename ThreadStatT>
bool ThreadListT<MemBlockPtrT, ThreadStatT>::initThisThread()
{
    bool retval(true);
    _threadCount.fetch_add(1);
    uint32_t lidAccum = _threadCountAccum.fetch_add(1);
    long localId(-1);
    for(size_t i = 0; (localId < 0) && (i < getMaxNumThreads()); i++) {
        ThreadPool & tp = _threadVector[i];
        if (tp.grabAvailable()) {
            localId = i;
        }
    }
    ASSERT_STACKTRACE(localId >= 0);
    ASSERT_STACKTRACE(size_t(localId) < getMaxNumThreads());
    _myPool = &_threadVector[localId];
    ASSERT_STACKTRACE(getThreadId() == size_t(localId));
    ASSERT_STACKTRACE(lidAccum < 0xffffffffu);
    getCurrent().init(lidAccum+1);

    return retval;
}

}