aboutsummaryrefslogtreecommitdiffstats
path: root/staging_vespalib
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2016-12-14 22:58:54 +0100
committerHenning Baldersheim <balder@yahoo-inc.com>2016-12-15 13:12:36 +0100
commit2a85dc3fd5af5c33601cf04ead06c7545fa46d75 (patch)
treef46f355235fd7684a9f8a6bb562797fd985d1180 /staging_vespalib
parentd9b45214d28207564329991afe70afc358fe6d12 (diff)
Split in hash_xxx, array, lru, cache ++ in hpp files. To reduce clinon build
Diffstat (limited to 'staging_vespalib')
-rw-r--r--staging_vespalib/src/vespa/vespalib/data/fileheader.h3
-rw-r--r--staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp4
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/cache.h139
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/cache.hpp161
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h246
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.hpp266
6 files changed, 445 insertions, 374 deletions
diff --git a/staging_vespalib/src/vespa/vespalib/data/fileheader.h b/staging_vespalib/src/vespa/vespalib/data/fileheader.h
index ca9f33714db..4ad6172bd2b 100644
--- a/staging_vespalib/src/vespa/vespalib/data/fileheader.h
+++ b/staging_vespalib/src/vespa/vespalib/data/fileheader.h
@@ -3,9 +3,10 @@
#include <map>
#include "databuffer.h"
-#include <vespa/fastos/file.h>
#include <vespa/vespalib/util/exception.h>
+class FastOS_FileInterface;
+
namespace vespalib {
class asciistream;
diff --git a/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp b/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp
index f142fedade5..48f40ccff3a 100644
--- a/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp
+++ b/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp
@@ -1,5 +1,5 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/fastos/fastos.h>
+
#include "identifiable.hpp"
#include <cassert>
#include <vespa/vespalib/util/stringfmt.h>
@@ -11,7 +11,7 @@
#include "objectpredicate.h"
#include "objectoperation.h"
#include <vespa/vespalib/util/classname.h>
-#include <vespa/vespalib/stllike/hash_set.h>
+#include <vespa/vespalib/stllike/hash_set.hpp>
namespace vespalib {
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/cache.h b/staging_vespalib/src/vespa/vespalib/stllike/cache.h
index 2304d6389b2..27d6740e7a6 100644
--- a/staging_vespalib/src/vespa/vespalib/stllike/cache.h
+++ b/staging_vespalib/src/vespa/vespalib/stllike/cache.h
@@ -61,21 +61,15 @@ public:
* @maxBytes is the maximum limit of bytes the store can hold, before eviction starts.
*/
cache(BackingStore & b, size_t maxBytes);
-
+ ~cache();
/**
* Can be used for controlling max number of elements.
*/
- cache & maxElements(size_t elems) {
- Lru::maxElements(elems);
- return *this;
- }
+ cache & maxElements(size_t elems);
/**
* Can be used for reserving space for elements.
*/
- cache & reserveElements(size_t elems) {
- Lru::reserve(elems);
- return *this;
- }
+ cache & reserveElements(size_t elems);
size_t capacity() const { return Lru::capacity(); }
size_t capacityBytes() const { return _maxBytes; }
@@ -91,10 +85,7 @@ public:
/**
* This simply erases the object from the cache.
*/
- void invalidate(const K & key) {
- vespalib::LockGuard guard(_hashLock);
- invalidate(guard, key);
- }
+ void invalidate(const K & key);
/**
* Return the object with the given key. If it does not exist, the backing store will be consulted.
@@ -114,10 +105,7 @@ public:
* Tell if an object with given key exists in the cache.
* Does not alter the LRU list.
*/
- bool hasKey(const K & key) const {
- vespalib::LockGuard guard(_hashLock);
- return hasKey(guard, key);
- }
+ bool hasKey(const K & key) const;
size_t getHit() const { return _hit; }
size_t getMiss() const { return _miss; }
@@ -133,7 +121,7 @@ protected:
vespalib::LockGuard getGuard();
void invalidate(const vespalib::LockGuard & guard, const K & key);
bool hasKey(const vespalib::LockGuard & guard, const K & key) const;
- bool hasLock() const { return TryLock(_hashLock).hasLock(); }
+ bool hasLock() const;
private:
/**
* Called when an object is inserted, to see if the LRU should be removed.
@@ -167,119 +155,4 @@ private:
vespalib::Lock _addLocks[113];
};
-template< typename P >
-cache<P>::cache(BackingStore & b, size_t maxBytes) :
- Lru(Lru::UNLIMITED),
- _maxBytes(maxBytes),
- _sizeBytes(0),
- _hit(0),
- _miss(0),
- _noneExisting(0),
- _race(0),
- _insert(0),
- _write(0),
- _erase(0),
- _invalidate(0),
- _lookup(0),
- _store(b)
-{ }
-
-template< typename P >
-bool
-cache<P>::removeOldest(const value_type & v) {
- bool remove(Lru::removeOldest(v) || (sizeBytes() >= capacityBytes()));
- if (remove) {
- _sizeBytes -= calcSize(v.first, v.second._value);
- }
- return remove;
}
-
-template< typename P >
-vespalib::LockGuard
-cache<P>::getGuard() {
- return vespalib::LockGuard(_hashLock);
-}
-
-template< typename P >
-typename P::Value
-cache<P>::read(const K & key)
-{
- {
- vespalib::LockGuard guard(_hashLock);
- if (Lru::hasKey(key)) {
- _hit++;
- return (*this)[key];
- } else {
- _miss++;
- }
- }
-
- vespalib::LockGuard storeGuard(getLock(key));
- {
- vespalib::LockGuard guard(_hashLock);
- if (Lru::hasKey(key)) {
- // Somebody else just fetched it ahead of me.
- _race++;
- return (*this)[key];
- }
- }
- V value;
- if (_store.read(key, value)) {
- vespalib::LockGuard guard(_hashLock);
- Lru::insert(key, value);
- _sizeBytes += calcSize(key, value);
- _insert++;
- } else {
- vespalib::Atomic::postInc(&_noneExisting);
- }
- return value;
-}
-
-template< typename P >
-void
-cache<P>::write(const K & key, const V & value)
-{
- vespalib::LockGuard storeGuard(getLock(key));
- {
- vespalib::LockGuard guard(_hashLock);
- (*this)[key] = value;
- _sizeBytes += calcSize(key, value);
- _write++;
- }
- _store.write(key, value);
-}
-
-template< typename P >
-void
-cache<P>::erase(const K & key)
-{
- vespalib::LockGuard storeGuard(getLock(key));
- invalidate(key);
- _store.erase(key);
-}
-
-template< typename P >
-void
-cache<P>::invalidate(const vespalib::LockGuard & guard, const K & key)
-{
- assert(guard.locks(_hashLock));
- (void) guard;
- if (Lru::hasKey(key)) {
- _sizeBytes -= calcSize(key, (*this)[key]);
- _invalidate++;
- Lru::erase(key);
- }
-}
-
-template< typename P >
-bool
-cache<P>::hasKey(const vespalib::LockGuard & guard, const K & key) const
-{
- (void) guard;
- assert(guard.locks(_hashLock));
- _lookup++;
- return Lru::hasKey(key);
-}
-
-}
-
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp b/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp
new file mode 100644
index 00000000000..1b608578e8d
--- /dev/null
+++ b/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp
@@ -0,0 +1,161 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "cache.h"
+#include "lrucache_map.hpp"
+
+namespace vespalib {
+
+template< typename P >
+cache<P> &
+cache<P>::maxElements(size_t elems) {
+ Lru::maxElements(elems);
+ return *this;
+}
+
+template< typename P >
+cache<P> &
+cache<P>::reserveElements(size_t elems) {
+ Lru::reserve(elems);
+ return *this;
+}
+
+template< typename P >
+void
+cache<P>::invalidate(const K & key) {
+ vespalib::LockGuard guard(_hashLock);
+ invalidate(guard, key);
+}
+
+template< typename P >
+bool
+cache<P>::hasKey(const K & key) const {
+ vespalib::LockGuard guard(_hashLock);
+ return hasKey(guard, key);
+}
+
+template< typename P >
+bool
+cache<P>::hasLock() const {
+ return TryLock(_hashLock).hasLock();
+}
+
+template< typename P >
+cache<P>::~cache() { }
+
+template< typename P >
+cache<P>::cache(BackingStore & b, size_t maxBytes) :
+ Lru(Lru::UNLIMITED),
+ _maxBytes(maxBytes),
+ _sizeBytes(0),
+ _hit(0),
+ _miss(0),
+ _noneExisting(0),
+ _race(0),
+ _insert(0),
+ _write(0),
+ _erase(0),
+ _invalidate(0),
+ _lookup(0),
+ _store(b)
+{ }
+
+template< typename P >
+bool
+cache<P>::removeOldest(const value_type & v) {
+ bool remove(Lru::removeOldest(v) || (sizeBytes() >= capacityBytes()));
+ if (remove) {
+ _sizeBytes -= calcSize(v.first, v.second._value);
+ }
+ return remove;
+}
+
+template< typename P >
+vespalib::LockGuard
+cache<P>::getGuard() {
+ return vespalib::LockGuard(_hashLock);
+}
+
+template< typename P >
+typename P::Value
+cache<P>::read(const K & key)
+{
+ {
+ vespalib::LockGuard guard(_hashLock);
+ if (Lru::hasKey(key)) {
+ _hit++;
+ return (*this)[key];
+ } else {
+ _miss++;
+ }
+ }
+
+ vespalib::LockGuard storeGuard(getLock(key));
+ {
+ vespalib::LockGuard guard(_hashLock);
+ if (Lru::hasKey(key)) {
+ // Somebody else just fetched it ahead of me.
+ _race++;
+ return (*this)[key];
+ }
+ }
+ V value;
+ if (_store.read(key, value)) {
+ vespalib::LockGuard guard(_hashLock);
+ Lru::insert(key, value);
+ _sizeBytes += calcSize(key, value);
+ _insert++;
+ } else {
+ vespalib::Atomic::postInc(&_noneExisting);
+ }
+ return value;
+}
+
+template< typename P >
+void
+cache<P>::write(const K & key, const V & value)
+{
+ vespalib::LockGuard storeGuard(getLock(key));
+ {
+ vespalib::LockGuard guard(_hashLock);
+ (*this)[key] = value;
+ _sizeBytes += calcSize(key, value);
+ _write++;
+ }
+ _store.write(key, value);
+}
+
+template< typename P >
+void
+cache<P>::erase(const K & key)
+{
+ vespalib::LockGuard storeGuard(getLock(key));
+ invalidate(key);
+ _store.erase(key);
+}
+
+template< typename P >
+void
+cache<P>::invalidate(const vespalib::LockGuard & guard, const K & key)
+{
+ assert(guard.locks(_hashLock));
+ (void) guard;
+ if (Lru::hasKey(key)) {
+ _sizeBytes -= calcSize(key, (*this)[key]);
+ _invalidate++;
+ Lru::erase(key);
+ }
+}
+
+template< typename P >
+bool
+cache<P>::hasKey(const vespalib::LockGuard & guard, const K & key) const
+{
+ (void) guard;
+ assert(guard.locks(_hashLock));
+ _lookup++;
+ return Lru::hasKey(key);
+}
+
+}
+
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h
index b48096f184c..747dfcacc56 100644
--- a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h
+++ b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h
@@ -2,6 +2,7 @@
#pragma once
#include <vespa/vespalib/stllike/hashtable.h>
+#include <vespa/vespalib/stllike/hash_fun.h>
namespace vespalib {
@@ -83,6 +84,7 @@ public:
* @param maxElements in cache unless you override @ref removeOldest.
*/
lrucache_map(size_t maxElems);
+ virtual ~lrucache_map();
lrucache_map & maxElements(size_t elems) {
_maxElements = elems;
@@ -119,13 +121,7 @@ public:
* Object is inserted in cache with given key.
* Object is then put at head of LRU list.
*/
- insert_result insert(const K & key, const V & value) {
- insert_result res = insert(value_type(key, LV(value)));
- if (res.second) {
- onInsert(key);
- }
- return res;
- }
+ insert_result insert(const K & key, const V & value);
/**
* Return the object with the given key. If it does not exist an empty one will be created.
@@ -155,16 +151,9 @@ public:
* The obvious extension is when you are storing pointers and want to cap
* on the real size of the object pointed to.
*/
- virtual bool removeOldest(const value_type & v) {
- (void) v;
- return (size() > capacity());
- }
- virtual void onRemove(const K & key) {
- (void) key;
- }
- virtual void onInsert(const K & key) {
- (void) key;
- }
+ virtual bool removeOldest(const value_type & v);
+ virtual void onRemove(const K & key);
+ virtual void onInsert(const K & key);
/**
* Method for testing that internal consitency is good.
@@ -198,19 +187,8 @@ private:
{
_lru._moveRecordingEnabled = true;
}
- uint32_t movedTo(uint32_t from) {
- for (size_t i(0); i < _lru._moved.size(); i++) {
- const MoveRecord & mr(_lru._moved[i]);
- if (mr.first == from) {
- from = mr.second;
- }
- }
- return from;
- }
- ~RecordMoves() {
- _lru._moveRecordingEnabled = false;
- _lru._moved.clear();
- }
+ ~RecordMoves();
+ uint32_t movedTo(uint32_t from);
private:
lrucache_map & _lru;
};
@@ -222,212 +200,4 @@ private:
MoveRecords _moved;
};
-template< typename P >
-lrucache_map<P>::lrucache_map(size_t maxElems) :
- HashTable(0),
- _maxElements(maxElems),
- _head(LinkedValueBase::npos),
- _tail(LinkedValueBase::npos),
- _moveRecordingEnabled(false),
- _moved()
-{
-}
-
-template< typename P >
-void
-lrucache_map<P>::swap(lrucache_map & rhs) {
- std::swap(_maxElements, rhs._maxElements);
- std::swap(_head, rhs._head);
- std::swap(_tail, rhs._tail);
- HashTable::swap(rhs);
-}
-
-template< typename P >
-void
-lrucache_map<P>::move(next_t from, next_t to) {
- (void) from;
- if (_moveRecordingEnabled) {
- _moved.push_back(std::make_pair(from, to));
- }
- value_type & moved = HashTable::getByInternalIndex(to);
- if (moved.second._prev != LinkedValueBase::npos) {
- HashTable::getByInternalIndex(moved.second._prev).second._next = to;
- } else {
- _head = to;
- }
- if (moved.second._next != LinkedValueBase::npos) {
- HashTable::getByInternalIndex(moved.second._next).second._prev = to;
- } else {
- _tail = to;
- }
}
-
-template< typename P >
-void
-lrucache_map<P>::erase(const K & key) {
- internal_iterator it = HashTable::find(key);
- if (it != HashTable::end()) {
- onRemove(key);
- LV & v = it->second;
- if (v._prev != LinkedValueBase::npos) {
- HashTable::getByInternalIndex(v._prev).second._next = v._next;
- } else {
- _head = v._next;
- }
- if (v._next != LinkedValueBase::npos) {
- HashTable::getByInternalIndex(v._next).second._prev = v._prev;
- } else {
- _tail = v._prev;
- }
- HashTable::erase(*this, it);
- }
-}
-
-template< typename P >
-typename lrucache_map<P>::iterator
-lrucache_map<P>::erase(const iterator & it)
-{
- iterator next(it);
- if (it != end()) {
- RecordMoves moves(*this);
- next++;
- const K & key(HashTable::getByInternalIndex(it._current).first);
- erase(key);
- next = iterator(this, moves.movedTo(next._current));
- }
- return next;
-}
-
-template< typename P >
-bool
-lrucache_map<P>::verifyInternals()
-{
- bool retval(true);
- assert(_head != LinkedValueBase::npos);
- assert(_tail != LinkedValueBase::npos);
- assert(HashTable::getByInternalIndex(_head).second._prev == LinkedValueBase::npos);
- assert(HashTable::getByInternalIndex(_tail).second._next == LinkedValueBase::npos);
- {
- size_t i(0);
- size_t prev(LinkedValueBase::npos);
- size_t c(_head);
- for(size_t m(size()); (c != LinkedValueBase::npos) && (i < m); c = HashTable::getByInternalIndex(c).second._next, i++) {
- assert((HashTable::getByInternalIndex(c).second._prev == prev));
- prev = c;
- }
- assert(i == size());
- assert(c == LinkedValueBase::npos);
- }
- {
- size_t i(0);
- size_t next(LinkedValueBase::npos);
- size_t c(_tail);
- for(size_t m(size()); (c != LinkedValueBase::npos) && (i < m); c = HashTable::getByInternalIndex(c).second._prev, i++) {
- assert((HashTable::getByInternalIndex(c).second._next == next));
- next = c;
- }
- assert(i == size());
- assert(c == LinkedValueBase::npos);
- }
- return retval;
-}
-
-template< typename P >
-void
-lrucache_map<P>::move(NodeStore && oldStore)
-{
- next_t curr(_tail);
- _tail = LinkedValueBase::npos;
- _head = LinkedValueBase::npos;
-
- while (curr != LinkedValueBase::npos) {
- value_type & v = oldStore[curr].getValue();
- curr = v.second._prev;
- v.second._prev = LinkedValueBase::npos;
- v.second._next = LinkedValueBase::npos;
- insert(std::move(v));
- }
-}
-
-template< typename P >
-void
-lrucache_map<P>::removeOld() {
- if (_tail != LinkedValueBase::npos) {
- for (value_type * last(& HashTable::getByInternalIndex(_tail));
- (_tail != _head) && removeOldest(*last);
- last = & HashTable::getByInternalIndex(_tail))
- {
- _tail = last->second._prev;
- HashTable::getByInternalIndex(_tail).second._next = LinkedValueBase::npos;
- HashTable::erase(*this, HashTable::find(last->first));
- }
- }
-}
-
-template< typename P >
-void
-lrucache_map<P>::ref(const internal_iterator & it) {
- uint32_t me(it.getInternalIndex());
- if (me != _head) {
- LV & v = it->second;
- LV & oldPrev = HashTable::getByInternalIndex(v._prev).second;
- oldPrev._next = v._next;
- if (me != _tail) {
- LV & oldNext = HashTable::getByInternalIndex(v._next).second;
- oldNext._prev = v._prev;
- } else {
- // I am tail and I am not the only one.
- _tail = v._prev;
- }
- LV & oldHead = HashTable::getByInternalIndex(_head).second;
- oldHead._prev = me;
- v._next = _head;
- v._prev = LinkedValueBase::npos;
- _head = me;
- }
-}
-
-template< typename P >
-typename lrucache_map<P>::insert_result
-lrucache_map<P>::insert(value_type && value) {
- insert_result res = HashTable::insertInternal(std::forward<value_type>(value));
- uint32_t next(_head);
- if ( ! res.second) {
- ref(res.first);
- } else {
- _head = res.first.getInternalIndex();
- HashTable::getByInternalIndex(_head).second._next = next;
- if (next != LinkedValueBase::npos) {
- HashTable::getByInternalIndex(next).second._prev = _head;
- }
- if (_tail == LinkedValueBase::npos) {
- _tail = _head;
- }
- removeOld();
- if (_head != res.first.getInternalIndex()) {
- res.first.setInternalIndex(_head);
- }
- }
- return res;
-}
-
-template< typename P >
-typename P::Value &
-lrucache_map<P>::operator [] (const K & key)
-{
- return insert(key, V()).first->second._value;
-}
-
-template< typename P >
-typename lrucache_map<P>::internal_iterator
-lrucache_map<P>::findAndRef(const K & key)
-{
- internal_iterator found = HashTable::find(key);
- if (found != HashTable::end()) {
- ref(found);
- }
- return found;
-}
-
-}
-
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.hpp b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.hpp
new file mode 100644
index 00000000000..3301abca3ba
--- /dev/null
+++ b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.hpp
@@ -0,0 +1,266 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "lrucache_map.h"
+#include <vespa/vespalib/stllike/hashtable.hpp>
+
+namespace vespalib {
+
+template< typename P >
+typename lrucache_map<P>::insert_result
+lrucache_map<P>::insert(const K & key, const V & value) {
+ insert_result res = insert(value_type(key, LV(value)));
+ if (res.second) {
+ onInsert(key);
+ }
+ return res;
+}
+
+template< typename P >
+bool
+lrucache_map<P>::removeOldest(const value_type & v) {
+ (void) v;
+ return (size() > capacity());
+}
+
+template< typename P >
+void
+lrucache_map<P>::onRemove(const K & key) {
+ (void) key;
+}
+
+template< typename P >
+void
+lrucache_map<P>::onInsert(const K & key) {
+ (void) key;
+}
+
+template< typename P >
+uint32_t
+lrucache_map<P>::RecordMoves::movedTo(uint32_t from) {
+ for (size_t i(0); i < _lru._moved.size(); i++) {
+ const MoveRecord & mr(_lru._moved[i]);
+ if (mr.first == from) {
+ from = mr.second;
+ }
+ }
+ return from;
+}
+
+template< typename P >
+lrucache_map<P>::RecordMoves::~RecordMoves() {
+ _lru._moveRecordingEnabled = false;
+ _lru._moved.clear();
+}
+
+template< typename P >
+lrucache_map<P>::lrucache_map(size_t maxElems) :
+ HashTable(0),
+ _maxElements(maxElems),
+ _head(LinkedValueBase::npos),
+ _tail(LinkedValueBase::npos),
+ _moveRecordingEnabled(false),
+ _moved()
+{ }
+
+template< typename P >
+lrucache_map<P>::~lrucache_map() { }
+
+template< typename P >
+void
+lrucache_map<P>::swap(lrucache_map & rhs) {
+ std::swap(_maxElements, rhs._maxElements);
+ std::swap(_head, rhs._head);
+ std::swap(_tail, rhs._tail);
+ HashTable::swap(rhs);
+}
+
+template< typename P >
+void
+lrucache_map<P>::move(next_t from, next_t to) {
+ (void) from;
+ if (_moveRecordingEnabled) {
+ _moved.push_back(std::make_pair(from, to));
+ }
+ value_type & moved = HashTable::getByInternalIndex(to);
+ if (moved.second._prev != LinkedValueBase::npos) {
+ HashTable::getByInternalIndex(moved.second._prev).second._next = to;
+ } else {
+ _head = to;
+ }
+ if (moved.second._next != LinkedValueBase::npos) {
+ HashTable::getByInternalIndex(moved.second._next).second._prev = to;
+ } else {
+ _tail = to;
+ }
+}
+
+template< typename P >
+void
+lrucache_map<P>::erase(const K & key) {
+ internal_iterator it = HashTable::find(key);
+ if (it != HashTable::end()) {
+ onRemove(key);
+ LV & v = it->second;
+ if (v._prev != LinkedValueBase::npos) {
+ HashTable::getByInternalIndex(v._prev).second._next = v._next;
+ } else {
+ _head = v._next;
+ }
+ if (v._next != LinkedValueBase::npos) {
+ HashTable::getByInternalIndex(v._next).second._prev = v._prev;
+ } else {
+ _tail = v._prev;
+ }
+ HashTable::erase(*this, it);
+ }
+}
+
+template< typename P >
+typename lrucache_map<P>::iterator
+lrucache_map<P>::erase(const iterator & it)
+{
+ iterator next(it);
+ if (it != end()) {
+ RecordMoves moves(*this);
+ next++;
+ const K & key(HashTable::getByInternalIndex(it._current).first);
+ erase(key);
+ next = iterator(this, moves.movedTo(next._current));
+ }
+ return next;
+}
+
+template< typename P >
+bool
+lrucache_map<P>::verifyInternals()
+{
+ bool retval(true);
+ assert(_head != LinkedValueBase::npos);
+ assert(_tail != LinkedValueBase::npos);
+ assert(HashTable::getByInternalIndex(_head).second._prev == LinkedValueBase::npos);
+ assert(HashTable::getByInternalIndex(_tail).second._next == LinkedValueBase::npos);
+ {
+ size_t i(0);
+ size_t prev(LinkedValueBase::npos);
+ size_t c(_head);
+ for(size_t m(size()); (c != LinkedValueBase::npos) && (i < m); c = HashTable::getByInternalIndex(c).second._next, i++) {
+ assert((HashTable::getByInternalIndex(c).second._prev == prev));
+ prev = c;
+ }
+ assert(i == size());
+ assert(c == LinkedValueBase::npos);
+ }
+ {
+ size_t i(0);
+ size_t next(LinkedValueBase::npos);
+ size_t c(_tail);
+ for(size_t m(size()); (c != LinkedValueBase::npos) && (i < m); c = HashTable::getByInternalIndex(c).second._prev, i++) {
+ assert((HashTable::getByInternalIndex(c).second._next == next));
+ next = c;
+ }
+ assert(i == size());
+ assert(c == LinkedValueBase::npos);
+ }
+ return retval;
+}
+
+template< typename P >
+void
+lrucache_map<P>::move(NodeStore && oldStore)
+{
+ next_t curr(_tail);
+ _tail = LinkedValueBase::npos;
+ _head = LinkedValueBase::npos;
+
+ while (curr != LinkedValueBase::npos) {
+ value_type & v = oldStore[curr].getValue();
+ curr = v.second._prev;
+ v.second._prev = LinkedValueBase::npos;
+ v.second._next = LinkedValueBase::npos;
+ insert(std::move(v));
+ }
+}
+
+template< typename P >
+void
+lrucache_map<P>::removeOld() {
+ if (_tail != LinkedValueBase::npos) {
+ for (value_type * last(& HashTable::getByInternalIndex(_tail));
+ (_tail != _head) && removeOldest(*last);
+ last = & HashTable::getByInternalIndex(_tail))
+ {
+ _tail = last->second._prev;
+ HashTable::getByInternalIndex(_tail).second._next = LinkedValueBase::npos;
+ HashTable::erase(*this, HashTable::find(last->first));
+ }
+ }
+}
+
+template< typename P >
+void
+lrucache_map<P>::ref(const internal_iterator & it) {
+ uint32_t me(it.getInternalIndex());
+ if (me != _head) {
+ LV & v = it->second;
+ LV & oldPrev = HashTable::getByInternalIndex(v._prev).second;
+ oldPrev._next = v._next;
+ if (me != _tail) {
+ LV & oldNext = HashTable::getByInternalIndex(v._next).second;
+ oldNext._prev = v._prev;
+ } else {
+ // I am tail and I am not the only one.
+ _tail = v._prev;
+ }
+ LV & oldHead = HashTable::getByInternalIndex(_head).second;
+ oldHead._prev = me;
+ v._next = _head;
+ v._prev = LinkedValueBase::npos;
+ _head = me;
+ }
+}
+
+template< typename P >
+typename lrucache_map<P>::insert_result
+lrucache_map<P>::insert(value_type && value) {
+ insert_result res = HashTable::insertInternal(std::forward<value_type>(value));
+ uint32_t next(_head);
+ if ( ! res.second) {
+ ref(res.first);
+ } else {
+ _head = res.first.getInternalIndex();
+ HashTable::getByInternalIndex(_head).second._next = next;
+ if (next != LinkedValueBase::npos) {
+ HashTable::getByInternalIndex(next).second._prev = _head;
+ }
+ if (_tail == LinkedValueBase::npos) {
+ _tail = _head;
+ }
+ removeOld();
+ if (_head != res.first.getInternalIndex()) {
+ res.first.setInternalIndex(_head);
+ }
+ }
+ return res;
+}
+
+template< typename P >
+typename P::Value &
+lrucache_map<P>::operator [] (const K & key)
+{
+ return insert(key, V()).first->second._value;
+}
+
+template< typename P >
+typename lrucache_map<P>::internal_iterator
+lrucache_map<P>::findAndRef(const K & key)
+{
+ internal_iterator found = HashTable::find(key);
+ if (found != HashTable::end()) {
+ ref(found);
+ }
+ return found;
+}
+
+}
+