aboutsummaryrefslogtreecommitdiffstats
path: root/storage/src/tests/persistence/persistencethread_splittest.cpp
blob: 41e86dfd160e824e161ed6dc0cf94f05dd0f18db (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

#include <vespa/storage/persistence/persistencehandler.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/persistence/spi/test.h>
#include <vespa/persistence/spi/persistenceprovider.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vdslib/state/clusterstate.h>

using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
using namespace ::testing;

namespace storage {

struct PersistenceThreadSplitTest : public PersistenceTestUtils {
    enum SplitCase {
        TOO_MANY_DOCS_SPLIT_ONCE, // Only one split needed to divide
        TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS, // Multiple bits needed to divide
        TOO_MANY_DOCS_ACTUALLY_NOT, // Other copy is too big but not this one
                                    // Multi bits needed, but dont do it.
        TOO_LARGE_DOCS_SPLIT_ONCE,
        TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS,
        TOO_LARGE_DOCS_SINGLE_DOC, // Cannot split single doc even if too large
        TOO_LARGE_DOCS_ACTUALLY_NOT, // Other copy is too large, not this one
        // Need to split to X bits to get in line with other copy or distr.
        SPLIT_TOO_LITTLE_SINGLE_SPLIT, // Split all to one target
        SPLIT_TOO_LITTLE_JUST_RIGHT, // Just manage to split in two at that lvl
        SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH, // Has to split shorter
        SPLIT_INCONSISTENT_1_DOC,
        SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID,
    };

    void doTest(SplitCase);
};

TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_many_docs) {
    doTest(TOO_MANY_DOCS_SPLIT_ONCE);
}

TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_many_docs) {
    doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS);
}

TEST_F(PersistenceThreadSplitTest, false_positive_too_many_docs) {
    doTest(TOO_MANY_DOCS_ACTUALLY_NOT);
}

TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_large_docs) {
    doTest(TOO_LARGE_DOCS_SPLIT_ONCE);
}

TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_large_docs) {
    doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS);
}

TEST_F(PersistenceThreadSplitTest, cannot_split_bucket_with_single_too_large_document) {
    doTest(TOO_LARGE_DOCS_SINGLE_DOC);
}

TEST_F(PersistenceThreadSplitTest, false_positive_too_large_docs) {
    doTest(TOO_LARGE_DOCS_ACTUALLY_NOT);
}

TEST_F(PersistenceThreadSplitTest, request_can_specify_minimum_split_bit_count) {
    doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT);
}

// TODO verify that name actually matches what test does...
TEST_F(PersistenceThreadSplitTest, can_split_into_2_targets_at_max_split_level) {
    doTest(SPLIT_TOO_LITTLE_JUST_RIGHT);
}

// TODO verify that name actually matches what test does...
TEST_F(PersistenceThreadSplitTest, actual_split_level_can_be_lower_than_max_level) {
    doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH);
}

TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_1_doc) {
    doTest(SPLIT_INCONSISTENT_1_DOC);
}

TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_all_docs_have_same_gid) {
    doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
}

void
PersistenceThreadSplitTest::doTest(SplitCase splitCase)
{
    uint32_t maxCount = 4;
    uint32_t maxSize = 1000 * 1000;
    uint32_t maxBits = 58;
    uint32_t minBits = 1;
    uint32_t docCount = 8;
    uint32_t docSize = 100 * 1000;
    uint32_t currentSplitLevel = 1;
    uint32_t splitLevelToDivide = 2;
    uint32_t resultSplitLevel = 2;
    size_t resultBuckets = 2;
    bool simulateGidCollision = false;
    api::ReturnCode error(api::ReturnCode::OK);
    switch (splitCase) {
        case TOO_MANY_DOCS_SPLIT_ONCE:
            break; // Default. Do nothing
        case TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS:
            splitLevelToDivide = 3;
            resultSplitLevel = 3;
            break;
        case TOO_MANY_DOCS_ACTUALLY_NOT:
            splitLevelToDivide = 3;
            docCount = 2;
            resultBuckets = 1;
            break;
        case TOO_LARGE_DOCS_SPLIT_ONCE:
            maxCount = 100;
            docSize = 400 * 1000;
            break;
        case TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS:
            maxCount = 100;
            docSize = 400 * 1000;
            splitLevelToDivide = 3;
            resultSplitLevel = 3;
            break;
        case TOO_LARGE_DOCS_SINGLE_DOC:
            // It is possible for bucket to be inconsistent being big enough
            // to split in other copy but this copy has only 1 too big doc.
            docCount = 1;
            docSize = 3000 * 1000;
            splitLevelToDivide = 3;
            resultBuckets = 1;
            break;
        case TOO_LARGE_DOCS_ACTUALLY_NOT:
            maxCount = 100;
            splitLevelToDivide = 3;
            resultSplitLevel = 2;
            resultBuckets = 1;
            break;
        case SPLIT_TOO_LITTLE_SINGLE_SPLIT:
            maxBits = 5;
            maxSize = 0;
            maxCount = 0;
            splitLevelToDivide = 16;
            resultSplitLevel = 5;
            resultBuckets = 1;
            break;
        case SPLIT_TOO_LITTLE_JUST_RIGHT:
            maxBits = 5;
            maxSize = 0;
            maxCount = 0;
            splitLevelToDivide = 5;
            resultSplitLevel = 5;
            break;
        case SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH:
            maxBits = 8;
            maxSize = 0;
            maxCount = 0;
            splitLevelToDivide = 5;
            resultSplitLevel = 5;
            break;
        case SPLIT_INCONSISTENT_1_DOC:
            docCount = 1;
            maxSize = 0;
            maxCount = 0;
            currentSplitLevel = 16;
            resultSplitLevel = 17;
            resultBuckets = 1;
            break;
        case SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID:
            docCount = 2;
            maxSize = 0;
            maxCount = 0;
            currentSplitLevel = 16;
            resultSplitLevel = 17;
            resultBuckets = 1;
            simulateGidCollision = true;
            break;
        default:
            assert(false);
    }

    uint64_t location = 0;
    uint64_t splitMask = 1ULL << (splitLevelToDivide - 1);
    spi::Bucket bucket(makeSpiBucket(document::BucketId(currentSplitLevel, 1)));
    spi::PersistenceProvider& spi(getPersistenceProvider());
    spi.deleteBucket(bucket);
    spi.createBucket(bucket);
    document::TestDocMan testDocMan;
    for (uint32_t i=0; i<docCount; ++i) {
        uint64_t docloc;
        uint32_t seed;
        if (!simulateGidCollision) {
            docloc = location | (i % 2 == 0 ? 0 : splitMask);
            seed = i;
        } else {
            docloc = location;
            seed = 0;
        }
        document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(docloc, seed, docSize, docSize));
        spi.put(bucket, spi::Timestamp(1000 + i), std::move(doc));
    }

    getNode().getStateUpdater().setClusterState(
            std::make_shared<lib::ClusterState>("distributor:1 storage:1"));
    document::Bucket docBucket = makeDocumentBucket(document::BucketId(currentSplitLevel, 1));
    auto cmd = std::make_shared<api::SplitBucketCommand>(docBucket);
    cmd->setMaxSplitBits(maxBits);
    cmd->setMinSplitBits(minBits);
    cmd->setMinByteSize(maxSize);
    cmd->setMinDocCount(maxCount);
    cmd->setSourceIndex(0);
    MessageTracker::UP result = _persistenceHandler->splitjoinHandler().handleSplitBucket(*cmd, createTracker(cmd, docBucket));
    api::ReturnCode code(result->getResult());
    EXPECT_EQ(error, code);
    if (!code.success()) {
        return;
    }
    auto& reply = dynamic_cast<api::SplitBucketReply&>(result->getReply());
    std::set<std::string> expected;
    for (uint32_t i=0; i<resultBuckets; ++i) {
        document::BucketId b(resultSplitLevel, location | (i == 0 ? 0 : splitMask));
        std::ostringstream ost;
        ost << b << " - " << b.getUsedBits();
        expected.insert(ost.str());
    }
    std::set<std::string> actual;
    for (uint32_t i=0; i<reply.getSplitInfo().size(); ++i) {
        std::ostringstream ost;
        document::BucketId b(reply.getSplitInfo()[i].first);
        ost << b << " - " << b.getUsedBits();
        actual.insert(ost.str());
    }
    EXPECT_EQ(expected, actual);
}

} // storage