From c0ecc5d29f69fcd6824ad971a3ce4773330a6f1e Mon Sep 17 00:00:00 2001 From: Henning Baldersheim Date: Tue, 3 Jan 2017 11:00:16 +0100 Subject: Implement both extension and shrinking of buffers. --- vespalib/src/tests/alloc/alloc_test.cpp | 54 ++++++++++++++++++++++------- vespalib/src/vespa/vespalib/util/alloc.cpp | 55 ++++++++++++++++++++++-------- vespalib/src/vespa/vespalib/util/alloc.h | 17 +++++++++ 3 files changed, 98 insertions(+), 28 deletions(-) (limited to 'vespalib/src') diff --git a/vespalib/src/tests/alloc/alloc_test.cpp b/vespalib/src/tests/alloc/alloc_test.cpp index 6950287edb1..03d600c6559 100644 --- a/vespalib/src/tests/alloc/alloc_test.cpp +++ b/vespalib/src/tests/alloc/alloc_test.cpp @@ -103,23 +103,51 @@ TEST("rounding of large mmaped buffer") { } TEST("heap alloc can not be extended") { - Alloc buf = Alloc::allocHeap(100); - void * oldPtr = buf.get(); - EXPECT_EQUAL(100, buf.size()); - EXPECT_FALSE(buf.resize_inplace(101)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(100, buf.size()); + Alloc buf = Alloc::allocHeap(100); + void * oldPtr = buf.get(); + EXPECT_EQUAL(100, buf.size()); + EXPECT_FALSE(buf.resize_inplace(101)); + EXPECT_EQUAL(oldPtr, buf.get()); + EXPECT_EQUAL(100, buf.size()); } -TEST("mmap alloc can be extended") { - Alloc buf = Alloc::allocMMap(100); - void * oldPtr = buf.get(); - EXPECT_EQUAL(4096, buf.size()); - EXPECT_TRUE(buf.resize_inplace(4097)); - EXPECT_EQUAL(oldPtr, buf.get()); - EXPECT_EQUAL(8192, buf.size()); +TEST("mmap alloc can be extended if room") { + Alloc reserved = Alloc::allocMMap(100); + Alloc buf = Alloc::allocMMap(100); + + // Normally mmapping starts at the top and grows down in address space. + // The there is no room to extend the last mapping. + // So in order to verify this we first mmap a reserved area that we unmap + // before we test extension. + EXPECT_GREATER(reserved.get(), buf.get()); + EXPECT_EQUAL(reserved.get(), static_cast(buf.get()) + buf.size()); + { + Alloc().swap(reserved); + } + + void * oldPtr = buf.get(); + EXPECT_EQUAL(4096, buf.size()); + EXPECT_TRUE(buf.resize_inplace(4097)); + EXPECT_EQUAL(oldPtr, buf.get()); + EXPECT_EQUAL(8192, buf.size()); } +TEST("heap alloc can not be shrinked") { + Alloc buf = Alloc::allocHeap(101); + void * oldPtr = buf.get(); + EXPECT_EQUAL(101, buf.size()); + EXPECT_FALSE(buf.resize_inplace(100)); + EXPECT_EQUAL(oldPtr, buf.get()); + EXPECT_EQUAL(101, buf.size()); +} +TEST("mmap alloc can be shrinked") { + Alloc buf = Alloc::allocMMap(4097); + void * oldPtr = buf.get(); + EXPECT_EQUAL(8192, buf.size()); + EXPECT_TRUE(buf.resize_inplace(4095)); + EXPECT_EQUAL(oldPtr, buf.get()); + EXPECT_EQUAL(4096, buf.size()); +} TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/vespa/vespalib/util/alloc.cpp b/vespalib/src/vespa/vespalib/util/alloc.cpp index 33769ac009d..513ad985206 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.cpp +++ b/vespalib/src/vespa/vespalib/util/alloc.cpp @@ -1,20 +1,16 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "alloc.h" -#include -#include #include -#include -#include #include #include #include #include -#include #include #include #include #include +#include LOG_SETUP(".vespalib.alloc"); namespace vespalib { @@ -30,6 +26,11 @@ size_t _G_MMapNoCoreLimit = std::numeric_limits::max(); Lock _G_lock; std::atomic _G_mmapCount(0); +size_t +roundUp2PageSize(size_t sz) { + return (sz + (_G_pageSize - 1)) & ~(_G_pageSize - 1); +} + struct MMapInfo { MMapInfo() : _id(0ul), @@ -141,10 +142,13 @@ public: PtrAndSize alloc(size_t sz) const override; void free(PtrAndSize alloc) const override; size_t resize_inplace(PtrAndSize current, size_t newSize) const override; - static size_t sextend_inplace(PtrAndSize current, size_t newSize); + static size_t sresize_inplace(PtrAndSize current, size_t newSize); static PtrAndSize salloc(size_t sz, void * wantedAddress); static void sfree(PtrAndSize alloc); static MemoryAllocator & getDefault(); +private: + static size_t extend_inplace(PtrAndSize current, size_t newSize); + static size_t shrink_inplace(PtrAndSize current, size_t newSize); }; class AutoAllocator : public MemoryAllocator { @@ -263,7 +267,7 @@ AlignedHeapAllocator::alloc(size_t sz) const { size_t MMapAllocator::resize_inplace(PtrAndSize current, size_t newSize) const { - return sextend_inplace(current, newSize); + return sresize_inplace(current, newSize); } MemoryAllocator::PtrAndSize @@ -275,7 +279,7 @@ MemoryAllocator::PtrAndSize MMapAllocator::salloc(size_t sz, void * wantedAddress) { void * buf(nullptr); - sz = (sz + (_G_pageSize - 1)) & ~(_G_pageSize - 1); + sz = roundUp2PageSize(sz); if (sz > 0) { const int flags(MAP_ANON | MAP_PRIVATE); const int prot(PROT_READ | PROT_WRITE); @@ -325,16 +329,35 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress) } size_t -MMapAllocator::sextend_inplace(PtrAndSize current, size_t newSize) { +MMapAllocator::sresize_inplace(PtrAndSize current, size_t newSize) { + newSize = roundUp2PageSize(newSize); + if (newSize > current.second) { + return extend_inplace(current, newSize); + } else if (newSize < current.second) { + return shrink_inplace(current, newSize); + } else { + return current.second; + } +} + +size_t +MMapAllocator::extend_inplace(PtrAndSize current, size_t newSize) { PtrAndSize got = MMapAllocator::salloc(newSize - current.second, static_cast(current.first)+current.second); - if (current.first == got.first) { - return got.second; + if ((static_cast(current.first) + current.second) == static_cast(got.first)) { + return current.second + got.second; } else { MMapAllocator::sfree(got); return 0; } } +size_t +MMapAllocator::shrink_inplace(PtrAndSize current, size_t newSize) { + PtrAndSize toUnmap(static_cast(current.first)+newSize, current.second - newSize); + sfree(toUnmap); + return newSize; +} + void MMapAllocator::free(PtrAndSize alloc) const { sfree(alloc); } @@ -342,8 +365,10 @@ void MMapAllocator::free(PtrAndSize alloc) const { void MMapAllocator::sfree(PtrAndSize alloc) { if (alloc.first != nullptr) { - madvise(alloc.first, alloc.second, MADV_DONTNEED); - munmap(alloc.first, alloc.second); + int retval = madvise(alloc.first, alloc.second, MADV_DONTNEED); + assert(retval == 0); + retval = munmap(alloc.first, alloc.second); + assert(retval == 0); if (alloc.second >= _G_MMapLogLimit) { LockGuard guard(_G_lock); MMapInfo info = _G_HugeMappings[alloc.first]; @@ -359,7 +384,7 @@ size_t AutoAllocator::resize_inplace(PtrAndSize current, size_t newSize) const { if (useMMap(current.second) && useMMap(newSize)) { newSize = roundUpToHugePages(newSize); - return MMapAllocator::sextend_inplace(current, newSize); + return MMapAllocator::sresize_inplace(current, newSize); } else { return 0; } @@ -398,7 +423,7 @@ bool Alloc::resize_inplace(size_t newSize) { size_t extendedSize = _allocator->resize_inplace(_alloc, newSize); - if (extendedSize > newSize) { + if (extendedSize >= newSize) { _alloc.second = extendedSize; return true; } diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h index 08f641ca634..a8b304b022c 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.h +++ b/vespalib/src/vespa/vespalib/util/alloc.h @@ -21,6 +21,15 @@ public: virtual ~MemoryAllocator() { } virtual PtrAndSize alloc(size_t sz) const = 0; virtual void free(PtrAndSize alloc) const = 0; + /* + * If possible the allocations will be resized. If it was possible it will return the real size, + * if not it shall return 0. + * Afterwards you have a buffer that can be accessed up to the new size. + * The old buffer is unmodified up to the new size. + * This is thread safe and at no point will data in the buffer be invalid. + * @param newSize The desired new size + * @return true if successful. + */ virtual size_t resize_inplace(PtrAndSize current, size_t newSize) const = 0; static size_t roundUpToHugePages(size_t sz) { return (sz+(HUGEPAGE_SIZE-1)) & ~(HUGEPAGE_SIZE-1); @@ -43,6 +52,14 @@ public: const void * get() const { return _alloc.first; } void * operator -> () { return _alloc.first; } const void * operator -> () const { return _alloc.first; } + /* + * If possible the allocations will be resized. If it was possible it will return true + * And you have an area that can be accessed up to the new size. + * The old buffer is unmodified up to the new size. + * This is thread safe and at no point will data in the buffer be invalid. + * @param newSize The desired new size + * @return true if successful. + */ bool resize_inplace(size_t newSize); Alloc(const Alloc &) = delete; Alloc & operator = (const Alloc &) = delete; -- cgit v1.2.3