summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@online.no>2022-10-03 16:14:37 +0200
committerTor Egge <Tor.Egge@online.no>2022-10-03 16:14:37 +0200
commitd2eefff31692a1e5d18461fdde987512be7e58dc (patch)
tree5e385e5c42ac48cc6a13ae19f43551a10f42b44d
parent73c11ef2eb40d941593d1616255f44cd66dfefd0 (diff)
Remove AlignedEntryRef.
-rw-r--r--searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp6
-rw-r--r--searchlib/src/tests/memoryindex/datastore/word_store_test.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.h9
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/word_store.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/word_store.h7
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp11
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp28
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.hpp12
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entryref.h28
-rw-r--r--vespalib/src/vespa/vespalib/datastore/free_list_raw_allocator.hpp6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp22
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h4
16 files changed, 68 insertions, 134 deletions
diff --git a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
index 34f9f7d27a9..564824031a6 100644
--- a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
@@ -90,8 +90,7 @@ TEST_F(FeatureStoreTest, features_can_be_added_and_retrieved)
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQ(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(1u, FeatureStore::RefType(r1).offset());
EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
@@ -131,8 +130,7 @@ TEST_F(FeatureStoreTest, next_words_are_working)
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQ(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(1u, FeatureStore::RefType(r1).offset());
EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
diff --git a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
index 698780a1dc2..1ca87467fc6 100644
--- a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
@@ -18,14 +18,14 @@ TEST(WordStoreTest, words_can_be_added_and_retrieved)
EntryRef r1 = ws.addWord(w1);
EntryRef r2 = ws.addWord(w2);
EntryRef r3 = ws.addWord(w3);
- uint32_t invp = WordStore::RefType::align(1); // Reserved as invalid
+ uint32_t invp = WordStore::buffer_array_size; // Reserved as invalid
uint32_t w1s = w1.size() + 1;
- uint32_t w1p = WordStore::RefType::pad(w1s);
+ uint32_t w1p = WordStore::calc_pad(w1s);
uint32_t w2s = w2.size() + 1;
- uint32_t w2p = WordStore::RefType::pad(w2s);
- EXPECT_EQ(invp, WordStore::RefType(r1).offset());
- EXPECT_EQ(invp + w1s + w1p, WordStore::RefType(r2).offset());
- EXPECT_EQ(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset());
+ uint32_t w2p = WordStore::calc_pad(w2s);
+ EXPECT_EQ(invp, WordStore::RefType(r1).offset() * WordStore::buffer_array_size);
+ EXPECT_EQ(invp + w1s + w1p, WordStore::RefType(r2).offset() * WordStore::buffer_array_size);
+ EXPECT_EQ(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset() * WordStore::buffer_array_size);
EXPECT_EQ(0u, WordStore::RefType(r1).bufferId());
EXPECT_EQ(0u, WordStore::RefType(r2).bufferId());
EXPECT_EQ(0u, WordStore::RefType(r3).bufferId());
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
index b37300375a8..b5810d06047 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp
@@ -9,6 +9,7 @@ namespace search::memoryindex {
constexpr size_t MIN_BUFFER_ARRAYS = 1024u;
using index::SchemaUtil;
+using vespalib::datastore::EntryRef;
uint64_t
FeatureStore::writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
@@ -26,10 +27,10 @@ FeatureStore::writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &featur
return oldOffset;
}
-vespalib::datastore::EntryRef
+EntryRef
FeatureStore::addFeatures(const uint8_t *src, uint64_t byteLen)
{
- uint32_t pad = RefType::pad(byteLen);
+ uint32_t pad = calc_pad(byteLen);
auto result = _store.rawAllocator<uint8_t>(_typeId).alloc(byteLen + pad, DECODE_SAFETY);
uint8_t *dst = result.data;
memcpy(dst, src, byteLen);
@@ -42,7 +43,7 @@ FeatureStore::addFeatures(const uint8_t *src, uint64_t byteLen)
return result.ref;
}
-std::pair<vespalib::datastore::EntryRef, uint64_t>
+std::pair<EntryRef, uint64_t>
FeatureStore::addFeatures(uint64_t beginOffset, uint64_t endOffset)
{
uint64_t bitLen = (endOffset - beginOffset);
@@ -52,18 +53,18 @@ FeatureStore::addFeatures(uint64_t beginOffset, uint64_t endOffset)
assert(wordLen > 0);
assert(byteLen > 0);
const uint8_t *src = reinterpret_cast<const uint8_t *>(_f._valI - wordLen);
- RefType ref = addFeatures(src, byteLen);
+ EntryRef ref = addFeatures(src, byteLen);
return std::make_pair(ref, bitLen);
}
-vespalib::datastore::EntryRef
-FeatureStore::moveFeatures(vespalib::datastore::EntryRef ref, uint64_t bitLen)
+EntryRef
+FeatureStore::moveFeatures(EntryRef ref, uint64_t bitLen)
{
const uint8_t *src = getBits(ref);
uint64_t byteLen = (bitLen + 7) / 8;
- RefType newRef = addFeatures(src, byteLen);
+ EntryRef newRef = addFeatures(src, byteLen);
// Mark old features as dead
- _store.incDead(ref, byteLen + RefType::pad(byteLen));
+ _store.incDead(ref, byteLen + calc_pad(byteLen));
return newRef;
}
@@ -74,8 +75,7 @@ FeatureStore::FeatureStore(const Schema &schema)
_d(nullptr),
_fieldsParams(),
_schema(schema),
- _type(RefType::align(1u), MIN_BUFFER_ARRAYS,
- RefType::offsetSize() / RefType::align(1u)),
+ _type(buffer_array_size, MIN_BUFFER_ARRAYS, RefType::offsetSize()),
_typeId(0)
{
_f.setWriteContext(&_fctx);
@@ -96,7 +96,7 @@ FeatureStore::~FeatureStore()
_store.dropBuffers();
}
-std::pair<vespalib::datastore::EntryRef, uint64_t>
+std::pair<EntryRef, uint64_t>
FeatureStore::addFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
{
uint64_t oldOffset = writeFeatures(packedIndex, features);
@@ -109,14 +109,14 @@ void
FeatureStore::add_features_guard_bytes()
{
uint32_t len = DECODE_SAFETY;
- uint32_t pad = RefType::pad(len);
- auto result = _store.rawAllocator<int8_t>(_typeId).alloc(len + pad);
+ uint32_t pad = calc_pad(len);
+ auto result = _store.rawAllocator<uint8_t>(_typeId).alloc(len + pad);
memset(result.data, 0, len + pad);
_store.incDead(result.ref, len + pad);
}
void
-FeatureStore::getFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef ref, DocIdAndFeatures &features)
+FeatureStore::getFeatures(uint32_t packedIndex, EntryRef ref, DocIdAndFeatures &features)
{
setupForField(packedIndex, _d);
setupForReadFeatures(ref, _d);
@@ -124,7 +124,7 @@ FeatureStore::getFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef re
}
size_t
-FeatureStore::bitSize(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
+FeatureStore::bitSize(uint32_t packedIndex, EntryRef ref)
{
setupForField(packedIndex, _d);
setupForUnpackFeatures(ref, _d);
@@ -136,8 +136,8 @@ FeatureStore::bitSize(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
return bitLen;
}
-vespalib::datastore::EntryRef
-FeatureStore::moveFeatures(uint32_t packedIndex, vespalib::datastore::EntryRef ref)
+EntryRef
+FeatureStore::moveFeatures(uint32_t packedIndex, EntryRef ref)
{
uint64_t bitLen = bitSize(packedIndex, ref);
return moveFeatures(ref, bitLen);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
index a96ae9a8f2d..b1d975d0926 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
@@ -14,11 +14,14 @@ namespace search::memoryindex {
*/
class FeatureStore {
public:
- using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::AlignedEntryRefT<22, 2>>;
+ using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::EntryRefT<22>>;
using RefType = DataStoreType::RefType;
using EncodeContext = bitcompression::EG2PosOccEncodeContext<true>;
using DecodeContextCooked = bitcompression::EG2PosOccDecodeContextCooked<true>;
using generation_t = vespalib::GenerationHandler::generation_t;
+ static constexpr uint32_t buffer_array_size = 4u; // Must be a power of 2
+ static constexpr uint32_t pad_constant = buffer_array_size - 1u;
+ static uint32_t calc_pad(uint32_t val) { return (-val & pad_constant); }
private:
using Schema = index::Schema;
@@ -154,7 +157,7 @@ public:
uint32_t bufferId = RefType(ref).bufferId();
const vespalib::datastore::BufferState &state = _store.getBufferState(bufferId);
decoder.setEnd(
- ((_store.getEntry<uint8_t>(RefType(0, bufferId)) + state.size() -
+ ((_store.getEntryArray<uint8_t>(RefType(0, bufferId), buffer_array_size) + state.size() -
bits) + 7) / 8,
false);
}
@@ -188,7 +191,7 @@ public:
*/
const uint8_t *getBits(vespalib::datastore::EntryRef ref) const {
RefType iRef(ref);
- return _store.getEntry<uint8_t>(iRef);
+ return _store.getEntryArray<uint8_t>(iRef, buffer_array_size);
}
/**
diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
index e5ec4ab7808..441587eb718 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp
@@ -10,16 +10,13 @@ constexpr size_t MIN_BUFFER_ARRAYS = 1024;
WordStore::WordStore()
: _store(),
_numWords(0),
- _type(RefType::align(1),
- MIN_BUFFER_ARRAYS,
- RefType::offsetSize() / RefType::align(1)),
+ _type(buffer_array_size, MIN_BUFFER_ARRAYS, RefType::offsetSize()),
_typeId(0)
{
_store.addType(&_type);
_store.init_primary_buffers();
}
-
WordStore::~WordStore()
{
_store.dropBuffers();
@@ -29,7 +26,7 @@ vespalib::datastore::EntryRef
WordStore::addWord(const vespalib::stringref word)
{
size_t wordSize = word.size() + 1;
- size_t bufferSize = RefType::align(wordSize);
+ size_t bufferSize = wordSize + calc_pad(wordSize);
auto result = _store.rawAllocator<char>(_typeId).alloc(bufferSize);
char *be = result.data;
for (size_t i = 0; i < word.size(); ++i) {
diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.h b/searchlib/src/vespa/searchlib/memoryindex/word_store.h
index b27ae65d776..913f6bc3ea5 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/word_store.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.h
@@ -9,8 +9,11 @@ namespace search::memoryindex {
class WordStore {
public:
- using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::AlignedEntryRefT<22, 2>>;
+ using DataStoreType = vespalib::datastore::DataStoreT<vespalib::datastore::EntryRefT<22>>;
using RefType = DataStoreType::RefType;
+ static constexpr uint32_t buffer_array_size = 4u; // Must be a power of 2
+ static constexpr uint32_t pad_constant = buffer_array_size - 1u;
+ static uint32_t calc_pad(uint32_t val) { return (-val & pad_constant); }
private:
DataStoreType _store;
@@ -24,7 +27,7 @@ public:
vespalib::datastore::EntryRef addWord(const vespalib::stringref word);
const char *getWord(vespalib::datastore::EntryRef ref) const {
RefType internalRef(ref);
- return _store.getEntry<char>(internalRef);
+ return _store.getEntryArray<char>(internalRef, buffer_array_size);
}
vespalib::MemoryUsage getMemoryUsage() const {
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
index 433f543ab92..11b6a1e3020 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
@@ -199,14 +199,14 @@ FakeMemTreeOccMgr::sync()
void
FakeMemTreeOccMgr::add(uint32_t wordIdx, index::DocIdAndFeatures &features)
{
- typedef FeatureStore::RefType RefType;
-
const FakeWord *fw = _fakeWords[wordIdx];
std::pair<EntryRef, uint64_t> r =
_featureStore.addFeatures(fw->getPackedIndex(), features);
+ size_t feature_size = (r.second + 7) / 8;
+ feature_size += FeatureStore::calc_pad(feature_size);
- _featureSizes[wordIdx] += RefType::align((r.second + 7) / 8) * 8;
+ _featureSizes[wordIdx] += feature_size * 8;
_unflushed.push_back(PendingOp(wordIdx, features.doc_id(), r.first));
@@ -240,7 +240,6 @@ FakeMemTreeOccMgr::sortUnflushed()
void
FakeMemTreeOccMgr::flush()
{
- typedef FeatureStore::RefType RefType;
typedef std::vector<PendingOp>::iterator I;
if (_unflushed.empty())
@@ -264,7 +263,9 @@ FakeMemTreeOccMgr::flush()
if (i->getRemove()) {
if (itr.valid() && itr.getKey() == docId) {
uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData().get_features_relaxed()));
- _featureSizes[wordIdx] -= RefType::align((bits + 7) / 8) * 8;
+ size_t feature_size = (bits + 7) / 8;
+ feature_size += FeatureStore::calc_pad(feature_size);
+ _featureSizes[wordIdx] -= feature_size * 8;
tree.remove(itr);
}
} else {
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 964978e5510..b77599c4e34 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -215,34 +215,6 @@ TEST(DataStoreTest, require_that_entry_ref_is_working)
}
}
-TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
-{
- using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
- EXPECT_EQ(16_Mi, MyRefType::offsetSize());
- EXPECT_EQ(1_Ki, MyRefType::numBuffers());
- EXPECT_EQ(0u, MyRefType::align(0));
- EXPECT_EQ(4u, MyRefType::align(1));
- EXPECT_EQ(4u, MyRefType::align(2));
- EXPECT_EQ(4u, MyRefType::align(3));
- EXPECT_EQ(4u, MyRefType::align(4));
- EXPECT_EQ(8u, MyRefType::align(5));
- {
- MyRefType r(0, 0);
- EXPECT_EQ(0u, r.offset());
- EXPECT_EQ(0u, r.bufferId());
- }
- {
- MyRefType r(237, 13);
- EXPECT_EQ(MyRefType::align(237), r.offset());
- EXPECT_EQ(13u, r.bufferId());
- }
- {
- MyRefType r(MyRefType::offsetSize() - 4, 1023);
- EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
- EXPECT_EQ(1023u, r.bufferId());
- }
-}
-
TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
{
using IntStore = DataStore<int>;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.hpp b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
index 666da55975c..4d09ffe4bc6 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.hpp
@@ -13,8 +13,7 @@ namespace vespalib::datastore {
template <typename RefT>
DataStoreT<RefT>::DataStoreT()
- : DataStoreBase(RefType::numBuffers(),
- RefType::unscaled_offset_size())
+ : DataStoreBase(RefType::numBuffers(), RefType::offsetSize())
{
}
@@ -42,7 +41,7 @@ DataStoreT<RefT>::free_elem_internal(EntryRef ref, size_t numElems, bool was_hel
state.decHoldElems(numElems);
}
state.cleanHold(getBuffer(intRef.bufferId()),
- intRef.unscaled_offset() * state.getArraySize(), numElems);
+ intRef.offset() * state.getArraySize(), numElems);
}
template <typename RefT>
@@ -50,15 +49,14 @@ void
DataStoreT<RefT>::holdElem(EntryRef ref, size_t numElems, size_t extraBytes)
{
RefType intRef(ref);
- size_t alignedLen = RefType::align(numElems);
BufferState &state = getBufferState(intRef.bufferId());
assert(state.isActive());
if (state.hasDisabledElemHoldList()) {
- state.incDeadElems(alignedLen);
+ state.incDeadElems(numElems);
return;
}
- _elemHold1List.push_back(ElemHold1ListElem(ref, alignedLen));
- state.incHoldElems(alignedLen);
+ _elemHold1List.push_back(ElemHold1ListElem(ref, numElems));
+ state.incHoldElems(numElems);
state.incExtraHoldBytes(extraBytes);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/entryref.h b/vespalib/src/vespa/vespalib/datastore/entryref.h
index 7667cc3d2c1..a0016f4fdcb 100644
--- a/vespalib/src/vespa/vespalib/datastore/entryref.h
+++ b/vespalib/src/vespa/vespalib/datastore/entryref.h
@@ -40,34 +40,6 @@ public:
uint32_t bufferId() const noexcept { return _ref >> OffsetBits; }
static size_t offsetSize() noexcept { return 1ul << OffsetBits; }
static uint32_t numBuffers() noexcept { return 1 << BufferBits; }
- static size_t align(size_t val) noexcept { return val; }
- static size_t pad(size_t val) noexcept { (void) val; return 0ul; }
- static constexpr bool isAlignedType = false;
- // TODO: Remove following temporary methods when removing
- // AlignedEntryRefT
- size_t unscaled_offset() const noexcept { return offset(); }
- static size_t unscaled_offset_size() noexcept { return offsetSize(); }
-};
-
-/**
- * Class for entry reference that is similar to EntryRefT,
- * except that we use (2^OffsetAlign) byte alignment on the offset.
- **/
-template <uint32_t OffsetBits, uint32_t OffsetAlign>
-class AlignedEntryRefT : public EntryRefT<OffsetBits> {
-private:
- typedef EntryRefT<OffsetBits> ParentType;
- static const uint32_t PadConstant = ((1 << OffsetAlign) - 1);
-public:
- AlignedEntryRefT() noexcept : ParentType() {}
- AlignedEntryRefT(size_t offset_, uint32_t bufferId_) noexcept :
- ParentType(align(offset_) >> OffsetAlign, bufferId_) {}
- AlignedEntryRefT(const EntryRef & ref_) noexcept : ParentType(ref_) {}
- size_t offset() const { return ParentType::offset() << OffsetAlign; }
- static size_t offsetSize() { return ParentType::offsetSize() << OffsetAlign; }
- static size_t align(size_t val) { return val + pad(val); }
- static size_t pad(size_t val) { return (-val & PadConstant); }
- static constexpr bool isAlignedType = true;
};
vespalib::asciistream& operator<<(vespalib::asciistream& os, const EntryRef& ref);
diff --git a/vespalib/src/vespa/vespalib/datastore/free_list_raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/free_list_raw_allocator.hpp
index 55d57b1bcb9..c4689cd9e4a 100644
--- a/vespalib/src/vespa/vespalib/datastore/free_list_raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/free_list_raw_allocator.hpp
@@ -24,10 +24,8 @@ FreeListRawAllocator<EntryT, RefT>::alloc(size_t numElems)
assert(state.isActive());
assert(state.getArraySize() == numElems);
RefT ref = state.popFreeList();
- // If entry ref is not aligned we must scale the offset according to array size as it was divided when the entry ref was created.
- EntryT *entry = !RefT::isAlignedType ?
- _store.template getEntryArray<EntryT>(ref, state.getArraySize()) :
- _store.template getEntry<EntryT>(ref);
+ // We must scale the offset according to array size as it was divided when the entry ref was created.
+ EntryT *entry = _store.template getEntryArray<EntryT>(ref, state.getArraySize());
return HandleType(ref, entry);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
index a17c3a28ced..0d67bf71c20 100644
--- a/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/raw_allocator.hpp
@@ -23,21 +23,13 @@ RawAllocator<EntryT, RefT>::alloc(size_t numElems, size_t extraElems)
BufferState &state = _store.getBufferState(buffer_id);
assert(state.isActive());
size_t oldBufferSize = state.size();
- if (RefT::isAlignedType) {
- // AlignedEntryRef constructor scales down offset by alignment
- RefT ref(oldBufferSize, buffer_id);
- EntryT *buffer = _store.getEntry<EntryT>(ref);
- state.pushed_back(numElems);
- return HandleType(ref, buffer);
- } else {
- // Must perform scaling ourselves, according to array size
- size_t arraySize = state.getArraySize();
- assert((numElems % arraySize) == 0u);
- RefT ref((oldBufferSize / arraySize), buffer_id);
- EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
- state.pushed_back(numElems);
- return HandleType(ref, buffer);
- }
+ // Must perform scaling ourselves, according to array size
+ size_t arraySize = state.getArraySize();
+ assert((numElems % arraySize) == 0u);
+ RefT ref((oldBufferSize / arraySize), buffer_id);
+ EntryT *buffer = _store.getEntryArray<EntryT>(ref, arraySize);
+ state.pushed_back(numElems);
+ return HandleType(ref, buffer);
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index a252763fb5b..cbb8369e1f2 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -113,8 +113,8 @@ private:
RefT iRef(oldRef);
uint32_t buffer_id = iRef.bufferId();
auto &inner_mapping = _mapping[buffer_id];
- assert(iRef.unscaled_offset() < inner_mapping.size());
- EntryRef &mappedRef = inner_mapping[iRef.unscaled_offset()];
+ assert(iRef.offset() < inner_mapping.size());
+ EntryRef &mappedRef = inner_mapping[iRef.offset()];
assert(!mappedRef.valid());
EntryRef newRef = _store.move(oldRef);
mappedRef = newRef;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
index e6627eb80e6..c4baff2206b 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
@@ -47,8 +47,8 @@ public:
uint32_t mapEntryRefToEnumValue(EntryRef ref) const {
if (ref.valid()) {
RefType iRef(ref);
- assert(iRef.unscaled_offset() < _enumValues[iRef.bufferId()].size());
- uint32_t enumValue = _enumValues[iRef.bufferId()][iRef.unscaled_offset()];
+ assert(iRef.offset() < _enumValues[iRef.bufferId()].size());
+ uint32_t enumValue = _enumValues[iRef.bufferId()][iRef.offset()];
assert(enumValue != 0);
return enumValue;
} else {
@@ -59,8 +59,8 @@ public:
uint32_t map_entry_ref_to_enum_value_or_zero(EntryRef ref) const {
if (ref.valid()) {
RefType iRef(ref);
- if (iRef.unscaled_offset() < _enumValues[iRef.bufferId()].size()) {
- return _enumValues[iRef.bufferId()][iRef.unscaled_offset()];
+ if (iRef.offset() < _enumValues[iRef.bufferId()].size()) {
+ return _enumValues[iRef.bufferId()][iRef.offset()];
} else {
return 0u;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
index 0cc6b4eded2..52437fc765c 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
@@ -31,8 +31,8 @@ UniqueStoreEnumerator<RefT>::enumerateValue(EntryRef ref)
{
RefType iRef(ref);
assert(iRef.valid());
- assert(iRef.unscaled_offset() < _enumValues[iRef.bufferId()].size());
- uint32_t &enumVal = _enumValues[iRef.bufferId()][iRef.unscaled_offset()];
+ assert(iRef.offset() < _enumValues[iRef.bufferId()].size());
+ uint32_t &enumVal = _enumValues[iRef.bufferId()][iRef.offset()];
assert(enumVal == 0u);
enumVal = _next_enum_val;
++_next_enum_val;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
index 80486f55ad8..4babd6204c7 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
@@ -32,8 +32,8 @@ public:
EntryRef remap(EntryRef ref) const {
RefType internal_ref(ref);
auto &inner_mapping = _mapping[internal_ref.bufferId()];
- assert(internal_ref.unscaled_offset() < inner_mapping.size());
- EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()];
+ assert(internal_ref.offset() < inner_mapping.size());
+ EntryRef mapped_ref = inner_mapping[internal_ref.offset()];
assert(mapped_ref.valid());
return mapped_ref;
}