diff options
author | Henning Baldersheim <balder@yahoo-inc.com> | 2017-01-04 00:17:54 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-01-04 00:17:54 +0100 |
commit | 1ece7c2b9b7ae8c95029e8c553ad277bcd0668fd (patch) | |
tree | 2f755a028df5044c552f3c6ebfe6d65049731178 | |
parent | 4a5c54e6e3780f7a02d83ec325938317ef3837b1 (diff) |
Revert "Balder/extend in place rebased"
-rw-r--r-- | vespalib/src/tests/alloc/alloc_test.cpp | 177 | ||||
-rw-r--r-- | vespalib/src/vespa/vespalib/util/alloc.cpp | 128 | ||||
-rw-r--r-- | vespalib/src/vespa/vespalib/util/alloc.h | 23 |
3 files changed, 36 insertions, 292 deletions
diff --git a/vespalib/src/tests/alloc/alloc_test.cpp b/vespalib/src/tests/alloc/alloc_test.cpp index f614ca79f04..e217abfbd16 100644 --- a/vespalib/src/tests/alloc/alloc_test.cpp +++ b/vespalib/src/tests/alloc/alloc_test.cpp @@ -16,11 +16,11 @@ testSwap(T & a, T & b) { void * tmpA(a.get()); void * tmpB(b.get()); - EXPECT_EQUAL(4096u, a.size()); - EXPECT_EQUAL(8192, b.size()); + EXPECT_EQUAL(100u, a.size()); + EXPECT_EQUAL(200u, b.size()); std::swap(a, b); - EXPECT_EQUAL(4096u, b.size()); - EXPECT_EQUAL(8192, a.size()); + EXPECT_EQUAL(100u, b.size()); + EXPECT_EQUAL(200u, a.size()); EXPECT_EQUAL(tmpA, b.get()); EXPECT_EQUAL(tmpB, a.get()); } @@ -39,24 +39,24 @@ TEST("test basics") { } { Alloc h = Alloc::allocMMap(100); - EXPECT_EQUAL(4096u, h.size()); + EXPECT_EQUAL(100u, h.size()); EXPECT_TRUE(h.get() != nullptr); } { - Alloc a = Alloc::allocHeap(4096), b = Alloc::allocHeap(8192); + Alloc a = Alloc::allocHeap(100), b = Alloc::allocHeap(200); testSwap(a, b); } { - Alloc a = Alloc::allocMMap(4096), b = Alloc::allocMMap(8192); + Alloc a = Alloc::allocMMap(100), b = Alloc::allocMMap(200); testSwap(a, b); } { - Alloc a = Alloc::allocAlignedHeap(4096, 1024), b = Alloc::allocAlignedHeap(8192, 1024); + Alloc a = Alloc::allocAlignedHeap(100, 1024), b = Alloc::allocAlignedHeap(200, 1024); testSwap(a, b); } { - Alloc a = Alloc::allocHeap(4096); - Alloc b = Alloc::allocMMap(8192); + Alloc a = Alloc::allocHeap(100); + Alloc b = Alloc::allocMMap(200); testSwap(a, b); } { @@ -102,161 +102,4 @@ TEST("rounding of large mmaped buffer") { EXPECT_EQUAL(MemoryAllocator::HUGEPAGE_SIZE*12, buf.size()); } -TEST("heap alloc can not be extended") { - Alloc buf = Alloc::allocHeap(100); - void * oldPtr = buf.get(); - EXPECT_EQUAL(100, buf.size()); - EXPECT_FALSE(buf.resize_inplace(101)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(100, buf.size()); -} - -TEST("auto alloced heap alloc can not be extended") { - Alloc buf = Alloc::alloc(100); - void * oldPtr = buf.get(); - EXPECT_EQUAL(100, buf.size()); - EXPECT_FALSE(buf.resize_inplace(101)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(100, buf.size()); -} - -TEST("auto alloced heap alloc can not be extended, even if resize will be mmapped") { - Alloc buf = Alloc::alloc(100); - void * oldPtr = buf.get(); - EXPECT_EQUAL(100, buf.size()); - EXPECT_FALSE(buf.resize_inplace(MemoryAllocator::HUGEPAGE_SIZE*3)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(100, buf.size()); -} - -TEST("auto alloced mmap alloc can be extended if room") { - static constexpr size_t SZ = MemoryAllocator::HUGEPAGE_SIZE*2; - Alloc reserved = Alloc::alloc(SZ); - Alloc buf = Alloc::alloc(SZ); - - // Normally mmapping starts at the top and grows down in address space. - // Then there is no room to extend the last mapping. - // So in order to verify this we first mmap a reserved area that we unmap - // before we test extension. - EXPECT_GREATER(reserved.get(), buf.get()); - EXPECT_EQUAL(reserved.get(), static_cast<const char *>(buf.get()) + buf.size()); - { - Alloc().swap(reserved); - } - - void * oldPtr = buf.get(); - EXPECT_EQUAL(SZ, buf.size()); - EXPECT_TRUE(buf.resize_inplace(SZ+1)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL((SZ/2)*3, buf.size()); -} - -TEST("auto alloced mmap alloc can not be extended if no room") { - static constexpr size_t SZ = MemoryAllocator::HUGEPAGE_SIZE*2; - Alloc reserved = Alloc::alloc(SZ); - Alloc buf = Alloc::alloc(SZ); - - // Normally mmapping starts at the top and grows down in address space. - // Then there is no room to extend the last mapping. - // So in order to verify this we first mmap a reserved area that we unmap - // before we test extension. - EXPECT_GREATER(reserved.get(), buf.get()); - EXPECT_EQUAL(reserved.get(), static_cast<const char *>(buf.get()) + buf.size()); - - void * oldPtr = buf.get(); - EXPECT_EQUAL(SZ, buf.size()); - EXPECT_FALSE(buf.resize_inplace(SZ+1)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(SZ, buf.size()); -} - -TEST("mmap alloc can be extended if room") { - Alloc reserved = Alloc::allocMMap(100); - Alloc buf = Alloc::allocMMap(100); - - // Normally mmapping starts at the top and grows down in address space. - // Then there is no room to extend the last mapping. - // So in order to verify this we first mmap a reserved area that we unmap - // before we test extension. - EXPECT_GREATER(reserved.get(), buf.get()); - EXPECT_EQUAL(reserved.get(), static_cast<const char *>(buf.get()) + buf.size()); - { - Alloc().swap(reserved); - } - - void * oldPtr = buf.get(); - EXPECT_EQUAL(4096, buf.size()); - EXPECT_TRUE(buf.resize_inplace(4097)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(8192, buf.size()); -} - -TEST("mmap alloc can not be extended if no room") { - Alloc reserved = Alloc::allocMMap(100); - Alloc buf = Alloc::allocMMap(100); - - // Normally mmapping starts at the top and grows down in address space. - // Then there is no room to extend the last mapping. - // So in order to verify this we first mmap a reserved area that we unmap - // before we test extension. - EXPECT_GREATER(reserved.get(), buf.get()); - EXPECT_EQUAL(reserved.get(), static_cast<const char *>(buf.get()) + buf.size()); - - void * oldPtr = buf.get(); - EXPECT_EQUAL(4096, buf.size()); - EXPECT_FALSE(buf.resize_inplace(4097)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(4096, buf.size()); -} - -TEST("heap alloc can not be shrinked") { - Alloc buf = Alloc::allocHeap(101); - void * oldPtr = buf.get(); - EXPECT_EQUAL(101, buf.size()); - EXPECT_FALSE(buf.resize_inplace(100)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(101, buf.size()); -} - -TEST("mmap alloc can be shrinked") { - Alloc buf = Alloc::allocMMap(4097); - void * oldPtr = buf.get(); - EXPECT_EQUAL(8192, buf.size()); - EXPECT_TRUE(buf.resize_inplace(4095)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(4096, buf.size()); -} - -TEST("auto alloced heap alloc can not be shrinked") { - Alloc buf = Alloc::alloc(101); - void * oldPtr = buf.get(); - EXPECT_EQUAL(101, buf.size()); - EXPECT_FALSE(buf.resize_inplace(100)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(101, buf.size()); -} - -TEST("auto alloced mmap alloc can be shrinked") { - static constexpr size_t SZ = MemoryAllocator::HUGEPAGE_SIZE; - Alloc buf = Alloc::alloc(SZ + 1); - void * oldPtr = buf.get(); - EXPECT_EQUAL(SZ + MemoryAllocator::HUGEPAGE_SIZE, buf.size()); - EXPECT_TRUE(buf.resize_inplace(SZ-1)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(SZ, buf.size()); -} - -TEST("auto alloced mmap alloc can not be shrinked below HUGEPAGE_SIZE/2 + 1 ") { - static constexpr size_t SZ = MemoryAllocator::HUGEPAGE_SIZE; - Alloc buf = Alloc::alloc(SZ + 1); - void * oldPtr = buf.get(); - EXPECT_EQUAL(SZ + MemoryAllocator::HUGEPAGE_SIZE, buf.size()); - EXPECT_TRUE(buf.resize_inplace(SZ/2 + 1)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(SZ, buf.size()); - EXPECT_FALSE(buf.resize_inplace(SZ/2)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(SZ, buf.size()); -} - TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/vespa/vespalib/util/alloc.cpp b/vespalib/src/vespa/vespalib/util/alloc.cpp index 16fa49dc37f..8d899eae551 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.cpp +++ b/vespalib/src/vespa/vespalib/util/alloc.cpp @@ -1,16 +1,20 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "alloc.h" +#include <stdlib.h> +#include <errno.h> #include <sys/mman.h> +#include <linux/mman.h> +#include <stdexcept> #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/util/backtrace.h> #include <vespa/vespalib/util/sync.h> +#include <vespa/log/log.h> #include <map> #include <atomic> #include <unordered_map> #include <vespa/fastos/file.h> -#include <vespa/log/log.h> LOG_SETUP(".vespalib.alloc"); namespace vespalib { @@ -20,17 +24,11 @@ namespace { volatile bool _G_hasHugePageFailureJustHappened(false); bool _G_SilenceCoreOnOOM(false); int _G_HugeFlags = 0; -size_t _G_pageSize = getpagesize(); size_t _G_MMapLogLimit = std::numeric_limits<size_t>::max(); size_t _G_MMapNoCoreLimit = std::numeric_limits<size_t>::max(); Lock _G_lock; std::atomic<size_t> _G_mmapCount(0); -size_t -roundUp2PageSize(size_t sz) { - return (sz + (_G_pageSize - 1)) & ~(_G_pageSize - 1); -} - struct MMapInfo { MMapInfo() : _id(0ul), @@ -120,7 +118,6 @@ class HeapAllocator : public MemoryAllocator { public: PtrAndSize alloc(size_t sz) const override; void free(PtrAndSize alloc) const override; - size_t resize_inplace(PtrAndSize, size_t) const override { return 0; } static PtrAndSize salloc(size_t sz); static void sfree(PtrAndSize alloc); static MemoryAllocator & getDefault(); @@ -141,14 +138,9 @@ class MMapAllocator : public MemoryAllocator { public: PtrAndSize alloc(size_t sz) const override; void free(PtrAndSize alloc) const override; - size_t resize_inplace(PtrAndSize current, size_t newSize) const override; - static size_t sresize_inplace(PtrAndSize current, size_t newSize); - static PtrAndSize salloc(size_t sz, void * wantedAddress); + static PtrAndSize salloc(size_t sz); static void sfree(PtrAndSize alloc); static MemoryAllocator & getDefault(); -private: - static size_t extend_inplace(PtrAndSize current, size_t newSize); - static size_t shrink_inplace(PtrAndSize current, size_t newSize); }; class AutoAllocator : public MemoryAllocator { @@ -156,7 +148,6 @@ public: AutoAllocator(size_t mmapLimit, size_t alignment) : _mmapLimit(mmapLimit), _alignment(alignment) { } PtrAndSize alloc(size_t sz) const override; void free(PtrAndSize alloc) const override; - size_t resize_inplace(PtrAndSize current, size_t newSize) const override; static MemoryAllocator & getDefault(); static MemoryAllocator & getAllocator(size_t mmapLimit, size_t alignment); private: @@ -165,14 +156,7 @@ private: ? MMapAllocator::roundUpToHugePages(sz) : sz; } - bool isMMapped(size_t sz) const { return (sz >= _mmapLimit); } - bool useMMap(size_t sz) const { - if (_mmapLimit >= HUGEPAGE_SIZE) { - return (sz + (HUGEPAGE_SIZE >> 1) - 1) >= _mmapLimit; - } else { - return (sz >= _mmapLimit); - } - } + bool useMMap(size_t sz) const { return (sz >= _mmapLimit); } size_t _mmapLimit; size_t _alignment; }; @@ -186,22 +170,17 @@ struct MMapLimitAndAlignmentHash { using AutoAllocatorsMap = std::unordered_map<MMapLimitAndAlignment, AutoAllocator::UP, MMapLimitAndAlignmentHash>; -void createAlignedAutoAllocators(AutoAllocatorsMap & map, size_t mmapLimit) { - for (size_t alignment : {0,0x200, 0x400, 0x1000}) { - MMapLimitAndAlignment key(mmapLimit, alignment); - auto result = map.emplace(key, AutoAllocator::UP(new AutoAllocator(mmapLimit, alignment))); - (void) result; - assert( result.second ); - - } -} - AutoAllocatorsMap createAutoAllocators() { AutoAllocatorsMap map; - map.reserve(3*5); - for (size_t pages : {1,2,4,8,16}) { - size_t mmapLimit = pages * MemoryAllocator::HUGEPAGE_SIZE; - createAlignedAutoAllocators(map, mmapLimit); + map.reserve(15); + for (size_t alignment : {0,0x200, 0x400, 0x1000}) { + for (size_t pages : {1,2,4,8,16}) { + size_t mmapLimit = pages * MemoryAllocator::HUGEPAGE_SIZE; + MMapLimitAndAlignment key(mmapLimit, alignment); + auto result = map.emplace(key, AutoAllocator::UP(new AutoAllocator(mmapLimit, alignment))); + (void) result; + assert( result.second ); + } } return map; } @@ -277,21 +256,15 @@ AlignedHeapAllocator::alloc(size_t sz) const { return PtrAndSize(ptr, sz); } -size_t -MMapAllocator::resize_inplace(PtrAndSize current, size_t newSize) const { - return sresize_inplace(current, newSize); -} - MemoryAllocator::PtrAndSize MMapAllocator::alloc(size_t sz) const { - return salloc(sz, nullptr); + return salloc(sz); } MemoryAllocator::PtrAndSize -MMapAllocator::salloc(size_t sz, void * wantedAddress) +MMapAllocator::salloc(size_t sz) { void * buf(nullptr); - sz = roundUp2PageSize(sz); if (sz > 0) { const int flags(MAP_ANON | MAP_PRIVATE); const int prot(PROT_READ | PROT_WRITE); @@ -301,7 +274,7 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress) stackTrace = getStackTrace(1); LOG(info, "mmap %ld of size %ld from %s", mmapId, sz, stackTrace.c_str()); } - buf = mmap(wantedAddress, sz, prot, flags | _G_HugeFlags, -1, 0); + buf = mmap(nullptr, sz, prot, flags | _G_HugeFlags, -1, 0); if (buf == MAP_FAILED) { if ( ! _G_hasHugePageFailureJustHappened ) { _G_hasHugePageFailureJustHappened = true; @@ -309,7 +282,7 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress) " Will resort to ordinary mmap until it works again.", sz, FastOS_FileInterface::getLastErrorString().c_str()); } - buf = mmap(wantedAddress, sz, prot, flags, -1, 0); + buf = mmap(nullptr, sz, prot, flags, -1, 0); if (buf == MAP_FAILED) { stackTrace = getStackTrace(1); string msg = make_string("Failed mmaping anonymous of size %ld errno(%d) from %s", sz, errno, stackTrace.c_str()); @@ -340,36 +313,6 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress) return PtrAndSize(buf, sz); } -size_t -MMapAllocator::sresize_inplace(PtrAndSize current, size_t newSize) { - newSize = roundUp2PageSize(newSize); - if (newSize > current.second) { - return extend_inplace(current, newSize); - } else if (newSize < current.second) { - return shrink_inplace(current, newSize); - } else { - return current.second; - } -} - -size_t -MMapAllocator::extend_inplace(PtrAndSize current, size_t newSize) { - PtrAndSize got = MMapAllocator::salloc(newSize - current.second, static_cast<char *>(current.first)+current.second); - if ((static_cast<const char *>(current.first) + current.second) == static_cast<const char *>(got.first)) { - return current.second + got.second; - } else { - MMapAllocator::sfree(got); - return 0; - } -} - -size_t -MMapAllocator::shrink_inplace(PtrAndSize current, size_t newSize) { - PtrAndSize toUnmap(static_cast<char *>(current.first)+newSize, current.second - newSize); - sfree(toUnmap); - return newSize; -} - void MMapAllocator::free(PtrAndSize alloc) const { sfree(alloc); } @@ -377,10 +320,8 @@ void MMapAllocator::free(PtrAndSize alloc) const { void MMapAllocator::sfree(PtrAndSize alloc) { if (alloc.first != nullptr) { - int retval = madvise(alloc.first, alloc.second, MADV_DONTNEED); - assert(retval == 0); - retval = munmap(alloc.first, alloc.second); - assert(retval == 0); + madvise(alloc.first, alloc.second, MADV_DONTNEED); + munmap(alloc.first, alloc.second); if (alloc.second >= _G_MMapLogLimit) { LockGuard guard(_G_lock); MMapInfo info = _G_HugeMappings[alloc.first]; @@ -392,21 +333,11 @@ void MMapAllocator::sfree(PtrAndSize alloc) } } -size_t -AutoAllocator::resize_inplace(PtrAndSize current, size_t newSize) const { - if (isMMapped(current.second) && useMMap(newSize)) { - newSize = roundUpToHugePages(newSize); - return MMapAllocator::sresize_inplace(current, newSize); - } else { - return 0; - } -} - MMapAllocator::PtrAndSize AutoAllocator::alloc(size_t sz) const { if (useMMap(sz)) { sz = roundUpToHugePages(sz); - return MMapAllocator::salloc(sz, nullptr); + return MMapAllocator::salloc(sz); } else { if (_alignment == 0) { return HeapAllocator::salloc(sz); @@ -418,7 +349,7 @@ AutoAllocator::alloc(size_t sz) const { void AutoAllocator::free(PtrAndSize alloc) const { - if (isMMapped(alloc.second)) { + if (useMMap(alloc.second)) { return MMapAllocator::sfree(alloc); } else { return HeapAllocator::sfree(alloc); @@ -431,17 +362,6 @@ Alloc::allocHeap(size_t sz) return Alloc(&HeapAllocator::getDefault(), sz); } -bool -Alloc::resize_inplace(size_t newSize) -{ - size_t extendedSize = _allocator->resize_inplace(_alloc, newSize); - if (extendedSize >= newSize) { - _alloc.second = extendedSize; - return true; - } - return false; -} - Alloc Alloc::allocAlignedHeap(size_t sz, size_t alignment) { diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h index 120e63b95d3..9f0937424a9 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.h +++ b/vespalib/src/vespa/vespalib/util/alloc.h @@ -21,16 +21,6 @@ public: virtual ~MemoryAllocator() { } virtual PtrAndSize alloc(size_t sz) const = 0; virtual void free(PtrAndSize alloc) const = 0; - /* - * If possible the allocations will be resized. If it was possible it will return the real size, - * if not it shall return 0. - * Afterwards you have a buffer that can be accessed up to the new size. - * The old buffer is unmodified up to the new size. - * This is thread safe and at no point will data in the buffer be invalid. - * @param newSize The desired new size - * @return true if successful. - */ - virtual size_t resize_inplace(PtrAndSize current, size_t newSize) const = 0; static size_t roundUpToHugePages(size_t sz) { return (sz+(HUGEPAGE_SIZE-1)) & ~(HUGEPAGE_SIZE-1); } @@ -45,22 +35,13 @@ public: class Alloc { private: - using PtrAndSize = MemoryAllocator::PtrAndSize; + using PtrAndSize = MemoryAllocator::PtrAndSize;; public: size_t size() const { return _alloc.second; } void * get() { return _alloc.first; } const void * get() const { return _alloc.first; } void * operator -> () { return _alloc.first; } const void * operator -> () const { return _alloc.first; } - /* - * If possible the allocations will be resized. If it was possible it will return true - * And you have an area that can be accessed up to the new size. - * The old buffer is unmodified up to the new size. - * This is thread safe and at no point will data in the buffer be invalid. - * @param newSize The desired new size - * @return true if successful. - */ - bool resize_inplace(size_t newSize); Alloc(const Alloc &) = delete; Alloc & operator = (const Alloc &) = delete; Alloc(Alloc && rhs) : @@ -102,7 +83,7 @@ public: * Optional alignment is assumed to be <= system page size, since mmap * is always used when size is above limit. */ - static Alloc alloc(size_t sz=0, size_t mmapLimit = MemoryAllocator::HUGEPAGE_SIZE, size_t alignment=0); + static Alloc alloc(size_t sz=0, size_t mmapLimit=MemoryAllocator::HUGEPAGE_SIZE, size_t alignment=0); private: Alloc(const MemoryAllocator * allocator, size_t sz) : _alloc(allocator->alloc(sz)), _allocator(allocator) { } void clear() { |