1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
/**
* @class storage::MergeHandler
*
* @brief Handles a merge of a single bucket.
*
* A merge is a complex operation in many stages covering multiple nodes. It
* needs to track some state of ongoing merges, and it also needs quite a bit
* of logic.
*
* This class implements tracks the state and implements the logic, such that
* the rest of the provider layer does not need to concern itself with merges.
*/
#pragma once
#include "types.h"
#include "merge_bucket_info_syncer.h"
#include <vespa/persistence/spi/bucket.h>
#include <vespa/persistence/spi/docentry.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storage/common/cluster_context.h>
#include <vespa/storage/common/messagesender.h>
namespace storage {
namespace spi {
struct PersistenceProvider;
class Context;
}
class PersistenceUtil;
class ApplyBucketDiffEntryResult;
class ApplyBucketDiffState;
class MergeStatus;
class MergeHandler : public Types,
public MergeBucketInfoSyncer {
public:
enum StateFlag {
IN_USE = 0x01,
DELETED = 0x02,
DELETED_IN_PLACE = 0x04
};
MergeHandler(PersistenceUtil& env, spi::PersistenceProvider& spi,
const ClusterContext& cluster_context, const framework::Clock & clock,
uint32_t maxChunkSize = 4190208,
uint32_t commonMergeChainOptimalizationMinimumSize = 64);
bool buildBucketInfoList(
const spi::Bucket& bucket,
Timestamp maxTimestamp,
uint8_t myNodeIndex,
std::vector<api::GetBucketDiffCommand::Entry>& output,
spi::Context& context) const;
void fetchLocalData(const spi::Bucket& bucket,
std::vector<api::ApplyBucketDiffCommand::Entry>& diff,
uint8_t nodeIndex,
spi::Context& context) const;
void applyDiffLocally(const spi::Bucket& bucket,
std::vector<api::ApplyBucketDiffCommand::Entry>& diff,
uint8_t nodeIndex,
spi::Context& context,
ApplyBucketDiffState& async_results) const;
void sync_bucket_info(const spi::Bucket& bucket) const override;
MessageTrackerUP handleMergeBucket(api::MergeBucketCommand&, MessageTrackerUP) const;
MessageTrackerUP handleGetBucketDiff(api::GetBucketDiffCommand&, MessageTrackerUP) const;
void handleGetBucketDiffReply(api::GetBucketDiffReply&, MessageSender&) const;
MessageTrackerUP handleApplyBucketDiff(api::ApplyBucketDiffCommand&, MessageTrackerUP) const;
void handleApplyBucketDiffReply(api::ApplyBucketDiffReply&, MessageSender&) const;
private:
const framework::Clock &_clock;
const ClusterContext &_cluster_context;
PersistenceUtil &_env;
spi::PersistenceProvider &_spi;
const uint32_t _maxChunkSize;
const uint32_t _commonMergeChainOptimalizationMinimumSize;
/** Returns a reply if merge is complete */
api::StorageReply::SP processBucketMerge(const spi::Bucket& bucket,
MergeStatus& status,
MessageSender& sender,
spi::Context& context) const;
/**
* Invoke either put, remove or unrevertable remove on the SPI
* depending on the flags in the diff entry.
*/
ApplyBucketDiffEntryResult applyDiffEntry(const spi::Bucket&,
const api::ApplyBucketDiffCommand::Entry&,
spi::Context& context,
const document::DocumentTypeRepo& repo) const;
/**
* Fill entries-vector with metadata for bucket up to maxTimestamp,
* sorted ascendingly on entry timestamp.
* Throws std::runtime_error upon iteration failure.
*/
void populateMetaData(const spi::Bucket&,
Timestamp maxTimestamp,
std::vector<spi::DocEntry::UP>& entries,
spi::Context& context) const;
Document::UP deserializeDiffDocument(
const api::ApplyBucketDiffCommand::Entry& e,
const document::DocumentTypeRepo& repo) const;
};
} // storage
|