aboutsummaryrefslogtreecommitdiffstats
path: root/searchlib/src/vespa/searchlib/docstore/storebybucket.cpp
blob: 6d3c39a51dc52e61860ee8d582d0c0f02a3d9c45 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

#include "storebybucket.h"
#include <vespa/vespalib/data/databuffer.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/cpu_usage.h>
#include <vespa/vespalib/util/lambdatask.h>
#include <algorithm>

namespace search::docstore {

using document::BucketId;
using vespalib::CpuUsage;
using vespalib::makeLambdaTask;

StoreByBucket::StoreByBucket(MemoryDataStore & backingMemory, Executor & executor, CompressionConfig compression) noexcept
    : _chunkSerial(0),
      _current(),
      _where(),
      _backingMemory(backingMemory),
      _executor(executor),
      _lock(std::make_unique<std::mutex>()),
      _cond(std::make_unique<std::condition_variable>()),
      _numChunksPosted(0),
      _chunks(),
      _compression(compression)
{
    createChunk().swap(_current);
}

StoreByBucket::~StoreByBucket() = default;

void
StoreByBucket::add(BucketId bucketId, uint32_t chunkId, uint32_t lid, const void *buffer, size_t sz)
{
    if ( ! _current->hasRoom(sz)) {
        Chunk::UP tmpChunk = createChunk();
        _current.swap(tmpChunk);
        incChunksPosted();
        auto task = makeLambdaTask([this, chunk=std::move(tmpChunk)]() mutable {
            closeChunk(std::move(chunk));
        });
        _executor.execute(CpuUsage::wrap(std::move(task), CpuUsage::Category::COMPACT));
    }
    Index idx(bucketId, _current->getId(), chunkId, lid);
    _current->append(lid, buffer, sz);
    _where[bucketId.toKey()].push_back(idx);
}

Chunk::UP
StoreByBucket::createChunk()
{
    return std::make_unique<Chunk>(_chunkSerial++, Chunk::Config(0x10000));
}

size_t
StoreByBucket::getChunkCount() const {
    std::lock_guard guard(*_lock);
    return _chunks.size();
}

void
StoreByBucket::closeChunk(Chunk::UP chunk)
{
    vespalib::DataBuffer buffer;
    chunk->pack(1, buffer, _compression);
    buffer.shrink(buffer.getDataLen());
    ConstBufferRef bufferRef(_backingMemory.push_back(buffer.getData(), buffer.getDataLen()).data(), buffer.getDataLen());
    std::lock_guard guard(*_lock);
    _chunks[chunk->getId()] = bufferRef;
    if (_numChunksPosted == _chunks.size()) {
        _cond->notify_one();
    }
}

void
StoreByBucket::incChunksPosted() {
    std::lock_guard guard(*_lock);
    _numChunksPosted++;
}

void
StoreByBucket::waitAllProcessed() {
    std::unique_lock guard(*_lock);
    while (_numChunksPosted != _chunks.size()) {
        _cond->wait(guard);
    }
}

void
StoreByBucket::drain(IWrite & drainer)
{
    incChunksPosted();
    auto task = makeLambdaTask([this, chunk=std::move(_current)]() mutable {
        closeChunk(std::move(chunk));
    });
    _executor.execute(CpuUsage::wrap(std::move(task), CpuUsage::Category::COMPACT));
    waitAllProcessed();
    std::vector<Chunk::UP> chunks;
    chunks.resize(_chunks.size());
    for (const auto & it : _chunks) {
        ConstBufferRef buf(it.second);
        chunks[it.first] = std::make_unique<Chunk>(it.first, buf.data(), buf.size());
    }
    _chunks.clear();
    for (auto & it : _where) {
        std::sort(it.second.begin(), it.second.end());
        for (Index idx : it.second) {
            vespalib::ConstBufferRef data(chunks[idx._id]->getLid(idx._lid));
            drainer.write(idx._bucketId, idx._chunkId, idx._lid, data.c_str(), data.size());
        }
    }
}

}

VESPALIB_HASH_MAP_INSTANTIATE(uint64_t, vespalib::ConstBufferRef);