aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib/src
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2017-01-03 11:00:16 +0100
committerHenning Baldersheim <balder@yahoo-inc.com>2017-01-03 11:01:57 +0100
commitc0ecc5d29f69fcd6824ad971a3ce4773330a6f1e (patch)
treeaf032c2598096a0072e90bc655c3a3c4eb532446 /vespalib/src
parent23d144ff2e2e828aa7a45932b7b5363f8fbc1651 (diff)
Implement both extension and shrinking of buffers.
Diffstat (limited to 'vespalib/src')
-rw-r--r--vespalib/src/tests/alloc/alloc_test.cpp54
-rw-r--r--vespalib/src/vespa/vespalib/util/alloc.cpp55
-rw-r--r--vespalib/src/vespa/vespalib/util/alloc.h17
3 files changed, 98 insertions, 28 deletions
diff --git a/vespalib/src/tests/alloc/alloc_test.cpp b/vespalib/src/tests/alloc/alloc_test.cpp
index 6950287edb1..03d600c6559 100644
--- a/vespalib/src/tests/alloc/alloc_test.cpp
+++ b/vespalib/src/tests/alloc/alloc_test.cpp
@@ -103,23 +103,51 @@ TEST("rounding of large mmaped buffer") {
}
TEST("heap alloc can not be extended") {
- Alloc buf = Alloc::allocHeap(100);
- void * oldPtr = buf.get();
- EXPECT_EQUAL(100, buf.size());
- EXPECT_FALSE(buf.resize_inplace(101));
- EXPECT_EQUAL(oldPtr, buf.get());
- EXPECT_EQUAL(100, buf.size());
+ Alloc buf = Alloc::allocHeap(100);
+ void * oldPtr = buf.get();
+ EXPECT_EQUAL(100, buf.size());
+ EXPECT_FALSE(buf.resize_inplace(101));
+ EXPECT_EQUAL(oldPtr, buf.get());
+ EXPECT_EQUAL(100, buf.size());
}
-TEST("mmap alloc can be extended") {
- Alloc buf = Alloc::allocMMap(100);
- void * oldPtr = buf.get();
- EXPECT_EQUAL(4096, buf.size());
- EXPECT_TRUE(buf.resize_inplace(4097));
- EXPECT_EQUAL(oldPtr, buf.get());
- EXPECT_EQUAL(8192, buf.size());
+TEST("mmap alloc can be extended if room") {
+ Alloc reserved = Alloc::allocMMap(100);
+ Alloc buf = Alloc::allocMMap(100);
+
+ // Normally mmapping starts at the top and grows down in address space.
+ // The there is no room to extend the last mapping.
+ // So in order to verify this we first mmap a reserved area that we unmap
+ // before we test extension.
+ EXPECT_GREATER(reserved.get(), buf.get());
+ EXPECT_EQUAL(reserved.get(), static_cast<const char *>(buf.get()) + buf.size());
+ {
+ Alloc().swap(reserved);
+ }
+
+ void * oldPtr = buf.get();
+ EXPECT_EQUAL(4096, buf.size());
+ EXPECT_TRUE(buf.resize_inplace(4097));
+ EXPECT_EQUAL(oldPtr, buf.get());
+ EXPECT_EQUAL(8192, buf.size());
}
+TEST("heap alloc can not be shrinked") {
+ Alloc buf = Alloc::allocHeap(101);
+ void * oldPtr = buf.get();
+ EXPECT_EQUAL(101, buf.size());
+ EXPECT_FALSE(buf.resize_inplace(100));
+ EXPECT_EQUAL(oldPtr, buf.get());
+ EXPECT_EQUAL(101, buf.size());
+}
+TEST("mmap alloc can be shrinked") {
+ Alloc buf = Alloc::allocMMap(4097);
+ void * oldPtr = buf.get();
+ EXPECT_EQUAL(8192, buf.size());
+ EXPECT_TRUE(buf.resize_inplace(4095));
+ EXPECT_EQUAL(oldPtr, buf.get());
+ EXPECT_EQUAL(4096, buf.size());
+}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/util/alloc.cpp b/vespalib/src/vespa/vespalib/util/alloc.cpp
index 33769ac009d..513ad985206 100644
--- a/vespalib/src/vespa/vespalib/util/alloc.cpp
+++ b/vespalib/src/vespa/vespalib/util/alloc.cpp
@@ -1,20 +1,16 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "alloc.h"
-#include <stdlib.h>
-#include <errno.h>
#include <sys/mman.h>
-#include <linux/mman.h>
-#include <stdexcept>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/backtrace.h>
#include <vespa/vespalib/util/sync.h>
-#include <vespa/log/log.h>
#include <map>
#include <atomic>
#include <unordered_map>
#include <vespa/fastos/file.h>
+#include <vespa/log/log.h>
LOG_SETUP(".vespalib.alloc");
namespace vespalib {
@@ -30,6 +26,11 @@ size_t _G_MMapNoCoreLimit = std::numeric_limits<size_t>::max();
Lock _G_lock;
std::atomic<size_t> _G_mmapCount(0);
+size_t
+roundUp2PageSize(size_t sz) {
+ return (sz + (_G_pageSize - 1)) & ~(_G_pageSize - 1);
+}
+
struct MMapInfo {
MMapInfo() :
_id(0ul),
@@ -141,10 +142,13 @@ public:
PtrAndSize alloc(size_t sz) const override;
void free(PtrAndSize alloc) const override;
size_t resize_inplace(PtrAndSize current, size_t newSize) const override;
- static size_t sextend_inplace(PtrAndSize current, size_t newSize);
+ static size_t sresize_inplace(PtrAndSize current, size_t newSize);
static PtrAndSize salloc(size_t sz, void * wantedAddress);
static void sfree(PtrAndSize alloc);
static MemoryAllocator & getDefault();
+private:
+ static size_t extend_inplace(PtrAndSize current, size_t newSize);
+ static size_t shrink_inplace(PtrAndSize current, size_t newSize);
};
class AutoAllocator : public MemoryAllocator {
@@ -263,7 +267,7 @@ AlignedHeapAllocator::alloc(size_t sz) const {
size_t
MMapAllocator::resize_inplace(PtrAndSize current, size_t newSize) const {
- return sextend_inplace(current, newSize);
+ return sresize_inplace(current, newSize);
}
MemoryAllocator::PtrAndSize
@@ -275,7 +279,7 @@ MemoryAllocator::PtrAndSize
MMapAllocator::salloc(size_t sz, void * wantedAddress)
{
void * buf(nullptr);
- sz = (sz + (_G_pageSize - 1)) & ~(_G_pageSize - 1);
+ sz = roundUp2PageSize(sz);
if (sz > 0) {
const int flags(MAP_ANON | MAP_PRIVATE);
const int prot(PROT_READ | PROT_WRITE);
@@ -325,16 +329,35 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress)
}
size_t
-MMapAllocator::sextend_inplace(PtrAndSize current, size_t newSize) {
+MMapAllocator::sresize_inplace(PtrAndSize current, size_t newSize) {
+ newSize = roundUp2PageSize(newSize);
+ if (newSize > current.second) {
+ return extend_inplace(current, newSize);
+ } else if (newSize < current.second) {
+ return shrink_inplace(current, newSize);
+ } else {
+ return current.second;
+ }
+}
+
+size_t
+MMapAllocator::extend_inplace(PtrAndSize current, size_t newSize) {
PtrAndSize got = MMapAllocator::salloc(newSize - current.second, static_cast<char *>(current.first)+current.second);
- if (current.first == got.first) {
- return got.second;
+ if ((static_cast<const char *>(current.first) + current.second) == static_cast<const char *>(got.first)) {
+ return current.second + got.second;
} else {
MMapAllocator::sfree(got);
return 0;
}
}
+size_t
+MMapAllocator::shrink_inplace(PtrAndSize current, size_t newSize) {
+ PtrAndSize toUnmap(static_cast<char *>(current.first)+newSize, current.second - newSize);
+ sfree(toUnmap);
+ return newSize;
+}
+
void MMapAllocator::free(PtrAndSize alloc) const {
sfree(alloc);
}
@@ -342,8 +365,10 @@ void MMapAllocator::free(PtrAndSize alloc) const {
void MMapAllocator::sfree(PtrAndSize alloc)
{
if (alloc.first != nullptr) {
- madvise(alloc.first, alloc.second, MADV_DONTNEED);
- munmap(alloc.first, alloc.second);
+ int retval = madvise(alloc.first, alloc.second, MADV_DONTNEED);
+ assert(retval == 0);
+ retval = munmap(alloc.first, alloc.second);
+ assert(retval == 0);
if (alloc.second >= _G_MMapLogLimit) {
LockGuard guard(_G_lock);
MMapInfo info = _G_HugeMappings[alloc.first];
@@ -359,7 +384,7 @@ size_t
AutoAllocator::resize_inplace(PtrAndSize current, size_t newSize) const {
if (useMMap(current.second) && useMMap(newSize)) {
newSize = roundUpToHugePages(newSize);
- return MMapAllocator::sextend_inplace(current, newSize);
+ return MMapAllocator::sresize_inplace(current, newSize);
} else {
return 0;
}
@@ -398,7 +423,7 @@ bool
Alloc::resize_inplace(size_t newSize)
{
size_t extendedSize = _allocator->resize_inplace(_alloc, newSize);
- if (extendedSize > newSize) {
+ if (extendedSize >= newSize) {
_alloc.second = extendedSize;
return true;
}
diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h
index 08f641ca634..a8b304b022c 100644
--- a/vespalib/src/vespa/vespalib/util/alloc.h
+++ b/vespalib/src/vespa/vespalib/util/alloc.h
@@ -21,6 +21,15 @@ public:
virtual ~MemoryAllocator() { }
virtual PtrAndSize alloc(size_t sz) const = 0;
virtual void free(PtrAndSize alloc) const = 0;
+ /*
+ * If possible the allocations will be resized. If it was possible it will return the real size,
+ * if not it shall return 0.
+ * Afterwards you have a buffer that can be accessed up to the new size.
+ * The old buffer is unmodified up to the new size.
+ * This is thread safe and at no point will data in the buffer be invalid.
+ * @param newSize The desired new size
+ * @return true if successful.
+ */
virtual size_t resize_inplace(PtrAndSize current, size_t newSize) const = 0;
static size_t roundUpToHugePages(size_t sz) {
return (sz+(HUGEPAGE_SIZE-1)) & ~(HUGEPAGE_SIZE-1);
@@ -43,6 +52,14 @@ public:
const void * get() const { return _alloc.first; }
void * operator -> () { return _alloc.first; }
const void * operator -> () const { return _alloc.first; }
+ /*
+ * If possible the allocations will be resized. If it was possible it will return true
+ * And you have an area that can be accessed up to the new size.
+ * The old buffer is unmodified up to the new size.
+ * This is thread safe and at no point will data in the buffer be invalid.
+ * @param newSize The desired new size
+ * @return true if successful.
+ */
bool resize_inplace(size_t newSize);
Alloc(const Alloc &) = delete;
Alloc & operator = (const Alloc &) = delete;