summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--metrics/src/vespa/metrics/metricmanager.cpp4
-rw-r--r--metrics/src/vespa/metrics/metricmanager.h1
-rw-r--r--storage/src/tests/bucketdb/lockablemaptest.cpp6
-rw-r--r--storage/src/vespa/storage/bucketdb/lockablemap.h4
-rw-r--r--storage/src/vespa/storage/bucketdb/lockablemap.hpp3
5 files changed, 9 insertions, 9 deletions
diff --git a/metrics/src/vespa/metrics/metricmanager.cpp b/metrics/src/vespa/metrics/metricmanager.cpp
index fd398f6d7d7..7ac912416de 100644
--- a/metrics/src/vespa/metrics/metricmanager.cpp
+++ b/metrics/src/vespa/metrics/metricmanager.cpp
@@ -74,7 +74,6 @@ MetricManager::MetricManager(std::unique_ptr<Timer> timer)
false)),
_timer(std::move(timer)),
_lastProcessedTime(0),
- _forceEventLogging(false),
_snapshotUnsetMetrics(false),
_consumerConfigChanged(false),
_metricManagerMetrics("metricmanager", {}, "Metrics for the metric manager upkeep tasks"),
@@ -719,7 +718,6 @@ MetricManager::forceEventLogging()
LOG(debug, "Forcing event logging to happen.");
// Ensure background thread is not in a current cycle during change.
vespalib::MonitorGuard sync(_waiter);
- _forceEventLogging = true;
sync.signal();
}
@@ -788,7 +786,7 @@ MetricManager::tick(const MetricLockGuard & guard, time_t currentTime)
// Set next work time to the time we want to take next snapshot.
time_t nextWorkTime = _snapshots[0]->getToTime() + _snapshots[0]->getPeriod();
time_t nextUpdateHookTime;
- if (nextWorkTime <= currentTime || _forceEventLogging) {
+ if (nextWorkTime <= currentTime) {
// If taking a new snapshot or logging, force calls to all
// update hooks.
LOG(debug, "%s. Calling update hooks.", nextWorkTime <= currentTime
diff --git a/metrics/src/vespa/metrics/metricmanager.h b/metrics/src/vespa/metrics/metricmanager.h
index b728510b6e4..6d1f8f4dcf7 100644
--- a/metrics/src/vespa/metrics/metricmanager.h
+++ b/metrics/src/vespa/metrics/metricmanager.h
@@ -111,7 +111,6 @@ private:
MetricSnapshot::SP _totalMetrics;
std::unique_ptr<Timer> _timer;
std::atomic<time_t> _lastProcessedTime;
- bool _forceEventLogging;
// Should be added to config, but wont now due to problems with
// upgrading
bool _snapshotUnsetMetrics;
diff --git a/storage/src/tests/bucketdb/lockablemaptest.cpp b/storage/src/tests/bucketdb/lockablemaptest.cpp
index a55e258129c..3dd4dfccf77 100644
--- a/storage/src/tests/bucketdb/lockablemaptest.cpp
+++ b/storage/src/tests/bucketdb/lockablemaptest.cpp
@@ -241,12 +241,12 @@ TEST(LockableMapTest, chunked_iteration_is_transparent_across_chunk_sizes) {
map.insert(14, A(42, 0, 0), "foo", preExisted);
NonConstProcessor ncproc; // Increments 2nd value in all entries.
// chunkedAll with chunk size of 1
- map.chunkedAll(ncproc, "foo", 1);
+ map.chunkedAll(ncproc, "foo", 1us, 1);
EXPECT_EQ(A(4, 7, 0), *map.get(11, "foo"));
EXPECT_EQ(A(42, 1, 0), *map.get(14, "foo"));
EXPECT_EQ(A(1, 3, 3), *map.get(16, "foo"));
// chunkedAll with chunk size larger than db size
- map.chunkedAll(ncproc, "foo", 100);
+ map.chunkedAll(ncproc, "foo", 1us, 100);
EXPECT_EQ(A(4, 8, 0), *map.get(11, "foo"));
EXPECT_EQ(A(42, 2, 0), *map.get(14, "foo"));
EXPECT_EQ(A(1, 4, 3), *map.get(16, "foo"));
@@ -263,7 +263,7 @@ TEST(LockableMapTest, can_abort_during_chunked_iteration) {
decisions.push_back(Map::CONTINUE);
decisions.push_back(Map::ABORT);
EntryProcessor proc(decisions);
- map.chunkedAll(proc, "foo", 100);
+ map.chunkedAll(proc, "foo", 1us, 100);
std::string expected("11 - A(4, 6, 0)\n"
"14 - A(42, 0, 0)\n");
EXPECT_EQ(expected, proc.toString());
diff --git a/storage/src/vespa/storage/bucketdb/lockablemap.h b/storage/src/vespa/storage/bucketdb/lockablemap.h
index 2b2c2cbe7a8..f6357e8851e 100644
--- a/storage/src/vespa/storage/bucketdb/lockablemap.h
+++ b/storage/src/vespa/storage/bucketdb/lockablemap.h
@@ -19,6 +19,7 @@
#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/stllike/hash_set.h>
#include <vespa/document/bucket/bucketid.h>
+#include <vespa/vespalib/util/time.h>
#include <mutex>
#include <condition_variable>
#include <cassert>
@@ -162,7 +163,7 @@ public:
const key_type& first = key_type(),
const key_type& last = key_type() - 1 );
- static constexpr uint32_t DEFAULT_CHUNK_SIZE = 10000;
+ static constexpr uint32_t DEFAULT_CHUNK_SIZE = 1000;
/**
* Iterate over the entire database contents, holding the global database
@@ -173,6 +174,7 @@ public:
template <typename Functor>
void chunkedAll(Functor& functor,
const char* clientId,
+ vespalib::duration yieldTime = 10us,
uint32_t chunkSize = DEFAULT_CHUNK_SIZE);
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
diff --git a/storage/src/vespa/storage/bucketdb/lockablemap.hpp b/storage/src/vespa/storage/bucketdb/lockablemap.hpp
index 3cef17c9025..a0d7e63c1fd 100644
--- a/storage/src/vespa/storage/bucketdb/lockablemap.hpp
+++ b/storage/src/vespa/storage/bucketdb/lockablemap.hpp
@@ -405,6 +405,7 @@ template <typename Functor>
void
LockableMap<Map>::chunkedAll(Functor& functor,
const char* clientId,
+ vespalib::duration yieldTime,
uint32_t chunkSize)
{
key_type key{};
@@ -416,7 +417,7 @@ LockableMap<Map>::chunkedAll(Functor& functor,
// This is a pragmatic stop-gap solution; a more robust change requires
// the redesign of bucket DB locking and signalling semantics in the
// face of blocked point lookups.
- std::this_thread::sleep_for(std::chrono::microseconds(100));
+ std::this_thread::sleep_for(yieldTime);
}
}