summaryrefslogtreecommitdiffstats
path: root/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
blob: f17792302d103c6d24b3850ef8197c17026309a0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

#include "joinoperation.h"
#include <vespa/storageapi/message/bucketsplitting.h>

#include <vespa/log/bufferedlogger.h>
LOG_SETUP(".distributor.operation.idealstate.join");

using namespace storage::distributor;

void
JoinOperation::onStart(DistributorMessageSender& sender)
{
    _ok = false;

    if (_bucketsToJoin.size() == 1) {
        LOG(debug, "Starting join operation for %s -> %s",
            _bucketsToJoin[0].toString().c_str(), getBucketId().toString().c_str());
    } else {
        LOG(debug, "Starting join operation for (%s,%s) -> %s",
            _bucketsToJoin[0].toString().c_str(),
            _bucketsToJoin[1].toString().c_str(),
            getBucketId().toString().c_str());
    }

    std::sort(_bucketsToJoin.begin(), _bucketsToJoin.end());

    auto nodeToBuckets = resolveSourceBucketsPerTargetNode();
    fillMissingSourceBucketsForInconsistentJoins(nodeToBuckets);

    _ok = enqueueJoinMessagePerTargetNode(nodeToBuckets);

    if (!_ok) {
        LOGBP(debug, "Unable to join bucket %s, since no copies are available (some in maintenance?)", getBucketId().toString().c_str());
        done();
    } else {
        _tracker.flushQueue(sender);
    }
}

JoinOperation::NodeToBuckets
JoinOperation::resolveSourceBucketsPerTargetNode() const
{
    NodeToBuckets nodeToBuckets;
    const auto& db(_manager->getDistributorComponent().getBucketDatabase());
    for (const auto& bucket : _bucketsToJoin) {
        BucketDatabase::Entry entry(db.get(bucket));

        for (uint32_t j = 0; j < entry->getNodeCount(); j++) {
            nodeToBuckets[entry->getNodeRef(j).getNode()].push_back(bucket);
        }
    }
    return nodeToBuckets;
}

void
JoinOperation::fillMissingSourceBucketsForInconsistentJoins(
        NodeToBuckets& nodeToBuckets) const
{
    for (auto& node : nodeToBuckets) {
        if (node.second.size() == 1) {
            document::BucketId source = node.second.front();
            node.second.push_back(source);
        }
    }
}

bool
JoinOperation::enqueueJoinMessagePerTargetNode(
        const NodeToBuckets& nodeToBuckets)
{
    if (nodeToBuckets.empty()) {
        return false;
    }
    for (const auto& node : nodeToBuckets) {
        std::shared_ptr<api::JoinBucketsCommand> msg(
                new api::JoinBucketsCommand(getBucketId()));
        msg->getSourceBuckets() = node.second;
        msg->setTimeout(INT_MAX);
        setCommandMeta(*msg);
        _tracker.queueCommand(msg, node.first);
    }
    return true;
}

void
JoinOperation::onReceive(DistributorMessageSender&, const api::StorageReply::SP& msg)
{
    api::JoinBucketsReply& rep = static_cast<api::JoinBucketsReply&>(*msg);
    uint16_t node = _tracker.handleReply(rep);
    if (node == 0xffff) {
        LOG(debug, "Ignored reply since node was max uint16_t for unknown "
                   "reasons");
        return;
    }

    if (rep.getResult().success()) {
        const std::vector<document::BucketId>& sourceBuckets(
                rep.getSourceBuckets());
        for (uint32_t i = 0; i < sourceBuckets.size(); i++) {
            _manager->getDistributorComponent().removeNodeFromDB(sourceBuckets[i], node);
        }

        // Add new buckets.
        if (!rep.getBucketInfo().valid()) {
            LOG(debug, "Invalid bucketinfo for bucket %s returned in join",
                getBucketId().toString().c_str());
        } else {
            _manager->getDistributorComponent().updateBucketDatabase(
                    getBucketId(),
                    BucketCopy(_manager->getDistributorComponent().getUniqueTimestamp(),
                               node,
                               rep.getBucketInfo()),
                    DatabaseUpdate::CREATE_IF_NONEXISTING);

            LOG(spam, "Adding joined bucket %s", getBucketId().toString().c_str());
        }
    } else if (rep.getResult().getResult() == api::ReturnCode::BUCKET_NOT_FOUND
            && _manager->getDistributorComponent().getBucketDatabase().get(getBucketId())->getNode(node) != 0)
    {
        _manager->getDistributorComponent().recheckBucketInfo(node, getBucketId());
        LOGBP(warning, "Join failed to find %s: %s",
              getBucketId().toString().c_str(),
              rep.getResult().toString().c_str());
    } else if (rep.getResult().isBusy()) {
        LOG(debug, "Join failed for %s, node was busy. Will retry later",
            getBucketId().toString().c_str());
    } else if (rep.getResult().isCriticalForMaintenance()) {
        LOGBP(warning, "Join failed for %s: %s with error '%s'",
              getBucketId().toString().c_str(), msg->toString().c_str(),
              msg->getResult().toString().c_str());
    } else {
        LOG(debug, "Join failed for %s with non-critical failure: %s",
            getBucketId().toString().c_str(),
            rep.getResult().toString().c_str());
    }
    _ok = rep.getResult().success();

    LOG(debug, "Bucket %s join finished", getBucketId().toString().c_str());
    if (_tracker.finished()) {
        done();
    }
}

bool
JoinOperation::isBlocked(const PendingMessageTracker& tracker) const
{
    return (checkBlock(getBucketId(), tracker) ||
            checkBlock(_bucketsToJoin[0], tracker) ||
            (_bucketsToJoin.size() > 1 && checkBlock(_bucketsToJoin[1], tracker)));
}