aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib/src/vespa/vespalib/util/alloc.h
blob: e9ae8a0ed58c43f22d4db1ca72d5a561c2e2ecba (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once

#include "optimized.h"
#include "memory_allocator.h"
#include <memory>

namespace vespalib::alloc {

/**
 * This represents an allocation.
 * It can be created, moved, swapped.
 * The allocation strategy is decided upon creation.
 * It can also create create additional allocations with the same allocation strategy.
**/
class Alloc
{
public:
    size_t size() const noexcept { return _alloc.size(); }
    void * get() noexcept { return _alloc.get(); }
    const void * get() const noexcept { return _alloc.get(); }
    void * operator -> () noexcept { return get(); }
    const void * operator -> () const noexcept { return get(); }
    /*
     * If possible the allocations will be resized. If it was possible it will return true
     * And you have an area that can be accessed up to the new size.
     * The old buffer is unmodified up to the new size.
     * This is thread safe and at no point will data in the buffer be invalid.
     * @param newSize The desired new size
     * @return true if successful.
     */
    bool resize_inplace(size_t newSize);
    Alloc(const Alloc &) = delete;
    Alloc & operator = (const Alloc &) = delete;
    Alloc(Alloc && rhs) noexcept
        : _alloc(rhs._alloc),
          _allocator(rhs._allocator)
    {
        rhs.clear();
    }
    Alloc & operator=(Alloc && rhs) noexcept {
        if (this != & rhs) {
            if (_alloc.get() != nullptr) {
                _allocator->free(_alloc);
            }
            _alloc = rhs._alloc;
            _allocator = rhs._allocator;
            rhs.clear();
        }
        return *this;
    }
    Alloc() noexcept : _alloc(), _allocator(nullptr) { }
    ~Alloc() noexcept {
        reset();
    }
    void swap(Alloc & rhs) noexcept {
        std::swap(_alloc, rhs._alloc);
        std::swap(_allocator, rhs._allocator);
    }
    void reset() noexcept {
        if (_alloc.get() != nullptr) {
            _allocator->free(_alloc);
            _alloc.reset();
        }
    }
    Alloc create(size_t sz) const noexcept {
        return (sz == 0) ? Alloc(_allocator) : Alloc(_allocator, sz);
    }

    static Alloc allocAlignedHeap(size_t sz, size_t alignment);
    static Alloc allocHeap(size_t sz=0);
    static Alloc allocMMap(size_t sz=0);
    /**
     * Optional alignment is assumed to be <= system page size, since mmap
     * is always used when size is above limit.
     */
    static Alloc alloc(size_t sz) noexcept;
    static Alloc alloc_aligned(size_t sz, size_t alignment) noexcept;
    static Alloc alloc(size_t sz, size_t mmapLimit, size_t alignment=0) noexcept;
    static Alloc alloc() noexcept;
    static Alloc alloc_with_allocator(const MemoryAllocator* allocator) noexcept;
private:
    Alloc(const MemoryAllocator * allocator, size_t sz) noexcept
        : _alloc(allocator->alloc(sz)),
          _allocator(allocator)
    { }
    Alloc(const MemoryAllocator * allocator) noexcept
        : _alloc(),
          _allocator(allocator)
    { }
    void clear() noexcept {
        _alloc.reset();
        _allocator = nullptr;
    }
    PtrAndSize              _alloc;
    const MemoryAllocator * _allocator;
};

}

namespace vespalib {

/// Rounds up to the closest number that is a power of 2
inline size_t
roundUp2inN(size_t minimum) {
    return 2ul << Optimized::msbIdx(minimum - 1);
}

/// Rounds minElems up to the closest number where minElems*elemSize is a power of 2
inline size_t
roundUp2inN(size_t minElems, size_t elemSize) {
    return roundUp2inN(minElems * elemSize)/elemSize;
}

template <typename T>
size_t
roundUp2inN(size_t elems) {
    return roundUp2inN(elems, sizeof(T));
}

}