summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/go.mod3
-rw-r--r--client/go/go.sum4
-rw-r--r--client/go/internal/vespa/document/document.go223
-rw-r--r--client/go/internal/vespa/document/document_test.go30
-rw-r--r--client/go/internal/vespa/document/http.go7
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java5
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java10
-rw-r--r--linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java10
-rw-r--r--storage/src/tests/distributor/check_condition_test.cpp16
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp3
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.h22
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.cpp15
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.h6
-rw-r--r--tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java2
31 files changed, 299 insertions, 134 deletions
diff --git a/client/go/go.mod b/client/go/go.mod
index c70ee5b75c8..f5f923cc063 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -6,7 +6,8 @@ require (
github.com/alessio/shellescape v1.4.1
github.com/briandowns/spinner v1.23.0
github.com/fatih/color v1.15.0
- github.com/goccy/go-json v0.10.2
+ // This is the most recent version compatible with Go 1.18. Upgrade when we upgrade our Go version
+ github.com/go-json-experiment/json v0.0.0-20220727223814-4987ed27d447
github.com/klauspost/compress v1.16.5
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.18
diff --git a/client/go/go.sum b/client/go/go.sum
index 9b79c215864..861c8725ed0 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -11,8 +11,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/go-json-experiment/json v0.0.0-20220727223814-4987ed27d447 h1:hDdASyrtiSuQvaafDrVTX34wy4ibhxrJO9/vyFbBt0k=
+github.com/go-json-experiment/json v0.0.0-20220727223814-4987ed27d447/go.mod h1:jbpkervfdK2HCcB2YEFmwYeaq057KFiaaKTNTHV4OOQ=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
diff --git a/client/go/internal/vespa/document/document.go b/client/go/internal/vespa/document/document.go
index ce8b22b24f0..a33c4a3c5af 100644
--- a/client/go/internal/vespa/document/document.go
+++ b/client/go/internal/vespa/document/document.go
@@ -11,7 +11,13 @@ import (
"time"
- "github.com/goccy/go-json"
+ // Why do we use an experimental parser? This appears to be the only JSON library that satisfies the following
+ // requirements:
+ // - Faster than the std parser
+ // - Supports parsing from a io.Reader
+ // - Supports parsing token-by-token
+ // - Few allocations during parsing (especially for large objects)
+ "github.com/go-json-experiment/json"
)
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
@@ -22,6 +28,12 @@ const (
OperationPut Operation = iota
OperationUpdate
OperationRemove
+
+ jsonArrayStart json.Kind = '['
+ jsonArrayEnd json.Kind = ']'
+ jsonObjectStart json.Kind = '{'
+ jsonObjectEnd json.Kind = '}'
+ jsonString json.Kind = '"'
)
// Id represents a Vespa document ID.
@@ -103,22 +115,16 @@ type Document struct {
Create bool
}
-type jsonDocument struct {
- IdString string `json:"id"`
- PutId string `json:"put"`
- UpdateId string `json:"update"`
- RemoveId string `json:"remove"`
- Condition string `json:"condition"`
- Fields json.RawMessage `json:"fields"`
- Create bool `json:"create"`
-}
-
// Decoder decodes documents from a JSON structure which is either an array of objects, or objects separated by newline.
type Decoder struct {
- buf *bufio.Reader
- dec *json.Decoder
+ r *bufio.Reader
+ dec *json.Decoder
+ buf bytes.Buffer
+
array bool
jsonl bool
+
+ fieldsEnd int64
}
func (d Document) String() string {
@@ -139,12 +145,16 @@ func (d Document) String() string {
if d.Create {
sb.WriteString(", create=true")
}
+ if d.Fields != nil {
+ sb.WriteString(", fields=")
+ sb.WriteString(string(d.Fields))
+ }
return sb.String()
}
func (d *Decoder) guessMode() error {
for !d.array && !d.jsonl {
- b, err := d.buf.ReadByte()
+ b, err := d.r.ReadByte()
if err != nil {
return err
}
@@ -152,36 +162,65 @@ func (d *Decoder) guessMode() error {
if b < 0x80 && asciiSpace[b] != 0 {
continue
}
- switch rune(b) {
- case '{':
+ switch json.Kind(b) {
+ case jsonObjectStart:
d.jsonl = true
- case '[':
+ case jsonArrayStart:
d.array = true
default:
return fmt.Errorf("unexpected token: %q", string(b))
}
- if err := d.buf.UnreadByte(); err != nil {
+ if err := d.r.UnreadByte(); err != nil {
return err
}
- if err := d.readArrayToken(true); err != nil {
+ if err := d.readArrayDelim(true); err != nil {
return err
}
}
return nil
}
-func (d *Decoder) readArrayToken(open bool) error {
+func (d *Decoder) readNext(kind json.Kind) (json.Token, error) {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return json.Token{}, err
+ }
+ if t.Kind() != kind {
+ return json.Token{}, fmt.Errorf("unexpected json kind: %q: want %q", t, kind)
+ }
+ return t, nil
+}
+
+func (d *Decoder) readArrayDelim(open bool) error {
if !d.array {
return nil
}
- t, err := d.dec.Token()
+ kind := jsonArrayEnd
+ if open {
+ kind = jsonArrayStart
+ }
+ _, err := d.readNext(kind)
+ return err
+}
+
+func (d *Decoder) readString() (string, error) {
+ t, err := d.readNext(jsonString)
+ if err != nil {
+ return "", err
+ }
+ return t.String(), nil
+}
+
+func (d *Decoder) readBool() (bool, error) {
+ t, err := d.dec.ReadToken()
if err != nil {
- return err
+ return false, err
}
- if (open && t == json.Delim('[')) || (!open && t == json.Delim(']')) {
- return nil
+ kind := t.Kind()
+ if kind != 't' && kind != 'f' {
+ return false, fmt.Errorf("unexpected json kind: %q: want %q or %q", t, 't', 'f')
}
- return fmt.Errorf("invalid array token: %q", t)
+ return t.Bool(), nil
}
func (d *Decoder) Decode() (Document, error) {
@@ -192,60 +231,112 @@ func (d *Decoder) Decode() (Document, error) {
return doc, err
}
+func (d *Decoder) readField(name string, doc *Document) error {
+ readId := false
+ switch name {
+ case "id", "put":
+ readId = true
+ doc.Operation = OperationPut
+ case "update":
+ readId = true
+ doc.Operation = OperationUpdate
+ case "remove":
+ readId = true
+ doc.Operation = OperationRemove
+ case "condition":
+ condition, err := d.readString()
+ if err != nil {
+ return err
+ }
+ doc.Condition = condition
+ case "create":
+ create, err := d.readBool()
+ if err != nil {
+ return err
+ }
+ doc.Create = create
+ case "fields":
+ if _, err := d.readNext(jsonObjectStart); err != nil {
+ return err
+ }
+ start := d.dec.InputOffset() - 1
+ // Skip data between the most recent ending position of fields and current offset
+ d.buf.Next(int(start - d.fieldsEnd))
+ depth := 1
+ for depth > 0 {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return err
+ }
+ switch t.Kind() {
+ case jsonObjectStart:
+ depth++
+ case jsonObjectEnd:
+ depth--
+ }
+ }
+ d.fieldsEnd = d.dec.InputOffset()
+ doc.Fields = make([]byte, int(d.fieldsEnd-start))
+ if _, err := d.buf.Read(doc.Fields); err != nil {
+ return err
+ }
+ }
+ if readId {
+ s, err := d.readString()
+ if err != nil {
+ return err
+ }
+ id, err := ParseId(s)
+ if err != nil {
+ return err
+ }
+ doc.Id = id
+ }
+ return nil
+}
+
func (d *Decoder) decode() (Document, error) {
if err := d.guessMode(); err != nil {
return Document{}, err
}
- if !d.dec.More() {
- if err := d.readArrayToken(false); err != nil {
+ if d.dec.PeekKind() == jsonArrayEnd {
+ // Reached end of the array holding document operations
+ if err := d.readArrayDelim(false); err != nil {
return Document{}, err
}
return Document{}, io.EOF
}
- doc := jsonDocument{}
- if err := d.dec.Decode(&doc); err != nil {
+ // Start of document operation
+ if _, err := d.readNext(jsonObjectStart); err != nil {
return Document{}, err
}
- return parseDocument(&doc)
-}
-
-func NewDecoder(r io.Reader) *Decoder {
- buf := bufio.NewReaderSize(r, 1<<26)
- return &Decoder{
- buf: buf,
- dec: json.NewDecoder(buf),
+ var doc Document
+loop:
+ for {
+ switch d.dec.PeekKind() {
+ case jsonString:
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return Document{}, err
+ }
+ if err := d.readField(t.String(), &doc); err != nil {
+ return Document{}, err
+ }
+ default:
+ if _, err := d.readNext(jsonObjectEnd); err != nil {
+ return Document{}, err
+ }
+ break loop
+ }
}
+ return doc, nil
}
-func parseDocument(d *jsonDocument) (Document, error) {
- id := ""
- var op Operation
- if d.IdString != "" {
- op = OperationPut
- id = d.IdString
- } else if d.PutId != "" {
- op = OperationPut
- id = d.PutId
- } else if d.UpdateId != "" {
- op = OperationUpdate
- id = d.UpdateId
- } else if d.RemoveId != "" {
- op = OperationRemove
- id = d.RemoveId
- } else {
- return Document{}, fmt.Errorf("invalid document: missing operation: %v", d)
- }
- docId, err := ParseId(id)
- if err != nil {
- return Document{}, err
- }
- return Document{
- Id: docId,
- Operation: op,
- Condition: d.Condition,
- Create: d.Create,
- Fields: d.Fields,
- }, nil
+func NewDecoder(r io.Reader) *Decoder {
+ sz := 1 << 26
+ d := &Decoder{r: bufio.NewReaderSize(r, sz)}
+ d.dec = json.NewDecoder(io.TeeReader(d.r, &d.buf))
+ return d
}
func parseError(value string) error {
diff --git a/client/go/internal/vespa/document/document_test.go b/client/go/internal/vespa/document/document_test.go
index 397136173bc..71400314634 100644
--- a/client/go/internal/vespa/document/document_test.go
+++ b/client/go/internal/vespa/document/document_test.go
@@ -113,18 +113,26 @@ func feedInput(jsonl bool) string {
`
{
"put": "id:ns:type::doc1",
- "fields": {"foo": "123"}
+ "fields": { "foo" : "123", "bar": {"a": [1, 2, 3]}}
}`,
`
{
"put": "id:ns:type::doc2",
+ "create": false,
+ "condition": "foo",
"fields": {"bar": "456"}
}`,
`
{
- "remove": "id:ns:type::doc1"
+ "remove": "id:ns:type::doc3"
}
-`}
+`,
+ `
+{
+ "put": "id:ns:type::doc4",
+ "create": true,
+ "fields": {"qux": "789"}
+}`}
if jsonl {
return strings.Join(operations, "\n")
}
@@ -135,9 +143,10 @@ func testDocumentDecoder(t *testing.T, jsonLike string) {
t.Helper()
r := NewDecoder(strings.NewReader(jsonLike))
want := []Document{
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)},
- {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Fields: []byte(`{"bar": "456"}`)},
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationRemove},
+ {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{ "foo" : "123", "bar": {"a": [1, 2, 3]}}`)},
+ {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Condition: "foo", Fields: []byte(`{"bar": "456"}`)},
+ {Id: mustParseId("id:ns:type::doc3"), Operation: OperationRemove},
+ {Id: mustParseId("id:ns:type::doc4"), Operation: OperationPut, Create: true, Fields: []byte(`{"qux": "789"}`)},
}
got := []Document{}
for {
@@ -155,10 +164,11 @@ func testDocumentDecoder(t *testing.T, jsonLike string) {
}
}
-func TestDocumentDecoder(t *testing.T) {
- testDocumentDecoder(t, feedInput(false))
- testDocumentDecoder(t, feedInput(true))
+func TestDocumentDecoderArray(t *testing.T) { testDocumentDecoder(t, feedInput(false)) }
+
+func TestDocumentDecoderJSONL(t *testing.T) { testDocumentDecoder(t, feedInput(true)) }
+func TestDocumentDecoderInvalid(t *testing.T) {
jsonLike := `
{
"put": "id:ns:type::doc1",
@@ -175,7 +185,7 @@ func TestDocumentDecoder(t *testing.T) {
t.Errorf("unexpected error: %s", err)
}
_, err = r.Decode()
- wantErr := "invalid json at byte offset 122: json: string of object unexpected end of JSON input"
+ wantErr := "invalid json at byte offset 109: json: invalid character '\\n' within string (expecting non-control character)"
if err.Error() != wantErr {
t.Errorf("want error %q, got %q", wantErr, err.Error())
}
diff --git a/client/go/internal/vespa/document/http.go b/client/go/internal/vespa/document/http.go
index a389a82cee1..8f7ac5bfe63 100644
--- a/client/go/internal/vespa/document/http.go
+++ b/client/go/internal/vespa/document/http.go
@@ -14,8 +14,7 @@ import (
"sync/atomic"
"time"
- "github.com/goccy/go-json"
-
+ "github.com/go-json-experiment/json"
"github.com/klauspost/compress/gzip"
"github.com/vespa-engine/vespa/client/go/internal/build"
@@ -328,8 +327,8 @@ func resultWithResponse(resp *http.Response, sentBytes int, result Result, elaps
result.Status = StatusTransportFailure
}
var body struct {
- Message string `json:"message"`
- Trace json.RawMessage `json:"trace"`
+ Message string `json:"message"`
+ Trace json.RawValue `json:"trace"`
}
buf.Reset()
written, err := io.Copy(buf, resp.Body)
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 41ee47513a0..0d42df88d04 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -116,6 +116,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Select summary decode type") default String summaryDecodePolicy() { return "eager"; }
@ModelFeatureFlag(owners = {"hmusum"}) default boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return false; }
@ModelFeatureFlag(owners = {"vekterli", "havardpe"}) default boolean enableConditionalPutRemoveWriteRepair() { return false; }
+ @ModelFeatureFlag(owners = {"mortent", "olaa"}) default boolean enableDataplaneProxy() { return false; }
//Below are all flags that must be kept until 7 is out of the door
@ModelFeatureFlag(owners = {"arnej"}, removeAfter="7.last") default boolean ignoreThreadStackSizes() { return false; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 3cb0731c43a..e1d222e0546 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -932,9 +932,11 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
+ ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
- ClusterSpec.Id.from(cluster.getName()),
+ clusterId,
+ zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 59ddb8b68ab..efdcaeec3aa 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -206,6 +206,7 @@ public class ModelContextImpl implements ModelContext {
private final String summaryDecodePolicy;
private final Predicate<ClusterSpec.Id> allowMoreThanOneContentGroupDown;
private final boolean enableConditionalPutRemoveWriteRepair;
+ private final boolean enableDataplaneProxy;
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) {
this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT);
@@ -254,6 +255,7 @@ public class ModelContextImpl implements ModelContext {
this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY);
this.allowMoreThanOneContentGroupDown = clusterId -> flagValue(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN);
this.enableConditionalPutRemoveWriteRepair = flagValue(source, appId, version, Flags.ENABLE_CONDITIONAL_PUT_REMOVE_WRITE_REPAIR);
+ this.enableDataplaneProxy = flagValue(source, appId, version, Flags.ENABLE_DATAPLANE_PROXY);
}
@Override public int heapSizePercentage() { return heapPercentage; }
@@ -310,6 +312,7 @@ public class ModelContextImpl implements ModelContext {
@Override public boolean enableGlobalPhase() { return enableGlobalPhase; }
@Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.test(id); }
@Override public boolean enableConditionalPutRemoveWriteRepair() { return enableConditionalPutRemoveWriteRepair; }
+ @Override public boolean enableDataplaneProxy() { return enableDataplaneProxy; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index 91ece6733e1..ac896338643 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -645,10 +645,16 @@ public class DeploymentStatus {
Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job));
Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job));
+ boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
+ .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
+ .isEmpty();
+
// If neither change is ready, we guess based on the specified rollout.
if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) {
return switch (rollout) {
- case separate -> List.of(change.withoutApplication(), change); // Platform should stay ahead.
+ case separate -> ! failingUpgradeOnlyTests
+ ? List.of(change.withoutApplication(), change) // Platform should stay ahead ...
+ : List.of(change); // ... unless upgrade-only is failing tests.
case leading -> List.of(change); // They should eventually join.
case simultaneous -> List.of(change.withoutPlatform(), change); // Revision should get ahead.
};
@@ -663,9 +669,6 @@ public class DeploymentStatus {
// Both changes are ready for this step, and we look to the specified rollout to decide.
boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get());
boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get());
- boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
- .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
- .isEmpty();
return switch (rollout) {
case separate -> // Let whichever change rolled out first, keep rolling first, unless upgrade alone is failing.
(platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) // Assume platform was first if no jobs have run yet.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index f911bde9535..b16fadfd230 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -2428,6 +2428,8 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if ( ! type.environment().isManuallyDeployed() && ! (isOperator(request) || controller.system().isCd()))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
+ controller.applications().verifyPlan(id.tenant());
+
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
@@ -3047,6 +3049,9 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
+ TenantName tenantName = TenantName.from(tenant);
+ controller.applications().verifyPlan(tenantName);
+
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong(); // Absence of this means it's not a prod app :/
@@ -3072,8 +3077,6 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
byte[] testPackage = dataParts.getOrDefault(APPLICATION_TEST_ZIP, new byte[0]);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, controller.clock().instant(), risk);
- TenantName tenantName = TenantName.from(tenant);
- controller.applications().verifyPlan(tenantName);
controller.applications().verifyApplicationIdentityConfiguration(tenantName,
Optional.empty(),
Optional.empty(),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 96c1d7c545d..f1e8697cf41 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -170,7 +170,10 @@ public class UpgraderTest {
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
- default0.deploy();
+ default0.runJob(systemTest)
+ .jobAborted(stagingTest) // New revision causes run with failing upgrade alone to be aborted.
+ .runJob(stagingTest)
+ .deploy();
tester.controllerTester().computeVersionStatus();
assertEquals(VespaVersion.Confidence.high, tester.controller().readVersionStatus().systemVersion().get().confidence());
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 0d0ddd11b13..8c75f341572 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -49,7 +49,7 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
public static final UnboundBooleanFlag IPV6_IN_GCP = defineFeatureFlag(
- "ipv6-in-gcp", true,
+ "ipv6-in-gcp", false,
List.of("hakonhall"), "2023-05-15", "2023-06-15",
"Provision GCP hosts with external IPv6 addresses",
"Takes effect on the next host provisioning");
@@ -429,6 +429,14 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundBooleanFlag ENABLE_DATAPLANE_PROXY = defineFeatureFlag(
+ "enable-dataplane-proxy", false,
+ List.of("mortent", "olaa"), "2023-05-15", "2023-08-01",
+ "Whether to enable dataplane proxy",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID
+ );
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
index b92e0678970..d812b85b82e 100644
--- a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
+++ b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
@@ -76,6 +76,7 @@ public class HuggingFaceTokenizer extends AbstractComponent implements Embedder,
public String decode(List<Long> tokens, Language language) { return resolve(language).decode(toArray(tokens)); }
@Override public void close() { models.forEach((__, model) -> model.close()); }
+ @Override public void deconstruct() { close(); }
private ai.djl.huggingface.tokenizers.HuggingFaceTokenizer resolve(Language language) {
// Disregard language if there is default model
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 722c9acfdc0..67ab36c725e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -122,6 +122,8 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final NodeFailer.ThrottlePolicy throttlePolicy;
DefaultTimes(Zone zone, Deployer deployer) {
+ boolean isCdZone = zone.system().isCd();
+
autoscalingInterval = Duration.ofMinutes(5);
dynamicProvisionerInterval = Duration.ofMinutes(3);
hostDeprovisionerInterval = Duration.ofMinutes(3);
@@ -137,7 +139,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
nodeMetricsCollectionInterval = Duration.ofMinutes(1);
expeditedChangeRedeployInterval = Duration.ofMinutes(3);
// Vespa upgrade frequency is higher in CD so (de)activate OS upgrades more frequently as well
- osUpgradeActivatorInterval = zone.system().isCd() ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
+ osUpgradeActivatorInterval = isCdZone ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
periodicRedeployInterval = Duration.ofMinutes(60);
provisionedExpiry = zone.cloud().dynamicProvisioning() ? Duration.ofMinutes(40) : Duration.ofHours(4);
rebalancerInterval = Duration.ofMinutes(120);
@@ -150,7 +152,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
hostRetirerInterval = Duration.ofMinutes(30);
- if (zone.environment().isProduction() && ! zone.system().isCd()) {
+ if (zone.environment().isProduction() && ! isCdZone) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
retiredInterval = Duration.ofMinutes(15);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
@@ -159,8 +161,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
// long enough that nodes aren't reused immediately and delete can happen on all config servers
// with time enough to clean up even with ZK connection issues on config servers
inactiveExpiry = Duration.ofMinutes(1);
- retiredInterval = Duration.ofMinutes(1);
dirtyExpiry = Duration.ofMinutes(30);
+ // Longer time in non-CD since we might end up with many deployments in a short time
+ // when retiring many hosts, e.g. when doing OS upgrades
+ retiredInterval = isCdZone ? Duration.ofMinutes(1) : Duration.ofMinutes(5);
retiredExpiry = Duration.ofDays(1);
}
}
diff --git a/storage/src/tests/distributor/check_condition_test.cpp b/storage/src/tests/distributor/check_condition_test.cpp
index 1b5cede8af6..ee8c9b888bb 100644
--- a/storage/src/tests/distributor/check_condition_test.cpp
+++ b/storage/src/tests/distributor/check_condition_test.cpp
@@ -253,4 +253,20 @@ TEST_F(CheckConditionTest, nested_get_traces_are_propagated_to_outcome) {
});
}
+TEST_F(CheckConditionTest, condition_evaluation_increments_probe_latency_metrics) {
+ getClock().setAbsoluteTimeInSeconds(1);
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 0);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 0);
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ getClock().setAbsoluteTimeInSeconds(3);
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) noexcept {
+ (void)outcome;
+ });
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 1);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 1);
+ EXPECT_DOUBLE_EQ(_metrics.latency.getLast(), 2'000.0); // in millis
+}
+
}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index ff375e5b902..76b6741442e 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -73,7 +73,8 @@ public:
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().puts);
+ metrics().puts,
+ metrics().put_condition_probes);
op->start(_sender);
}
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index d352d23bb8c..d169c80a95d 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -41,7 +41,8 @@ struct RemoveOperationTest : Test, DistributorStripeTestUtil {
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().removes);
+ metrics().removes,
+ metrics().remove_condition_probes);
op->start(_sender);
}
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.cpp b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
index fad44782dd4..cbc0e6f6eef 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
@@ -16,11 +16,13 @@ BucketDbMetrics::~BucketDbMetrics() = default;
DistributorMetricSet::DistributorMetricSet()
: MetricSet("distributor", {{"distributor"}}, ""),
puts("puts", this),
+ put_condition_probes("put_condition_probes", this),
updates(this),
update_puts("update_puts", this),
update_gets("update_gets", this),
update_metadata_gets("update_metadata_gets", this),
removes("removes", this),
+ remove_condition_probes("remove_condition_probes", this),
removelocations("removelocations", this),
gets("gets", this),
stats("stats", this),
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.h b/storage/src/vespa/storage/distributor/distributormetricsset.h
index ac140b85282..739e84759f1 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.h
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.h
@@ -20,24 +20,26 @@ struct BucketDbMetrics : metrics::MetricSet {
class DistributorMetricSet : public metrics::MetricSet {
public:
PersistenceOperationMetricSet puts;
- UpdateMetricSet updates;
+ PersistenceOperationMetricSet put_condition_probes;
+ UpdateMetricSet updates;
PersistenceOperationMetricSet update_puts;
PersistenceOperationMetricSet update_gets;
PersistenceOperationMetricSet update_metadata_gets;
PersistenceOperationMetricSet removes;
+ PersistenceOperationMetricSet remove_condition_probes;
PersistenceOperationMetricSet removelocations;
PersistenceOperationMetricSet gets;
PersistenceOperationMetricSet stats;
PersistenceOperationMetricSet getbucketlists;
- VisitorMetricSet visits;
- metrics::DoubleAverageMetric stateTransitionTime;
- metrics::DoubleAverageMetric set_cluster_state_processing_time;
- metrics::DoubleAverageMetric activate_cluster_state_processing_time;
- metrics::DoubleAverageMetric recoveryModeTime;
- metrics::LongValueMetric docsStored;
- metrics::LongValueMetric bytesStored;
- BucketDbMetrics mutable_dbs;
- BucketDbMetrics read_only_dbs;
+ VisitorMetricSet visits;
+ metrics::DoubleAverageMetric stateTransitionTime;
+ metrics::DoubleAverageMetric set_cluster_state_processing_time;
+ metrics::DoubleAverageMetric activate_cluster_state_processing_time;
+ metrics::DoubleAverageMetric recoveryModeTime;
+ metrics::LongValueMetric docsStored;
+ metrics::LongValueMetric bytesStored;
+ BucketDbMetrics mutable_dbs;
+ BucketDbMetrics read_only_dbs;
explicit DistributorMetricSet();
~DistributorMetricSet() override;
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index 6cb404aaa0a..d6bb5562a07 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -332,7 +332,9 @@ bool ExternalOperationHandler::onPut(const std::shared_ptr<api::PutCommand>& cmd
if (allow) {
_op = std::make_shared<PutOperation>(_node_ctx, _op_ctx,
_op_ctx.bucket_space_repo().get(bucket_space),
- std::move(cmd), getMetrics().puts, std::move(handle));
+ std::move(cmd),
+ getMetrics().puts, getMetrics().put_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
@@ -386,7 +388,8 @@ bool ExternalOperationHandler::onRemove(const std::shared_ptr<api::RemoveCommand
auto &distributorBucketSpace(_op_ctx.bucket_space_repo().get(bucket_space));
_op = std::make_shared<RemoveOperation>(_node_ctx, _op_ctx, distributorBucketSpace, std::move(cmd),
- getMetrics().removes, std::move(handle));
+ getMetrics().removes, getMetrics().remove_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
index 9f7dbcaa132..fc619d9eb23 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
@@ -58,7 +58,7 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag)
: _doc_id_bucket(bucket),
@@ -66,7 +66,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
_node_ctx(node_ctx),
_cluster_state_version_at_creation_time(_bucket_space.getClusterState().getVersion()),
_cond_get_op(),
- _sent_message_map()
+ _sent_message_map(),
+ _outcome()
{
// Condition checks only return metadata back to the distributor and thus have an empty fieldset.
// Side note: the BucketId provided to the GetCommand is ignored; GetOperation computes explicitly from the doc ID.
@@ -75,8 +76,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
get_cmd->getTrace().setLevel(trace_level);
_cond_get_op = std::make_shared<GetOperation>(_node_ctx, _bucket_space,
_bucket_space.getBucketDatabase().acquire_read_guard(),
- std::move(get_cmd),
- metric, api::InternalReadConsistency::Strong);
+ std::move(get_cmd), condition_probe_metrics,
+ api::InternalReadConsistency::Strong);
}
CheckCondition::~CheckCondition() = default;
@@ -220,7 +221,7 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level)
{
// TODO move this check to the caller?
@@ -237,8 +238,8 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
if (!all_nodes_support_document_condition_probe(entries, op_ctx)) {
return {}; // Want write-repair, but one or more nodes are too old to use the feature
}
- return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space,
- node_ctx, metric, trace_level, private_ctor_tag{});
+ return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space, node_ctx,
+ condition_probe_metrics, trace_level, private_ctor_tag{});
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.h b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
index 062c9bb831d..382aec6242c 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.h
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
@@ -114,7 +114,7 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag);
~CheckCondition();
@@ -135,7 +135,7 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level);
private:
[[nodiscard]] bool replica_set_changed_after_get_operation() const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 952aeff0800..8c6fdb314f3 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -26,6 +26,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucket_space,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencing_handle)
: SequencedOperation(std::move(sequencing_handle)),
_tracker_instance(metric, std::make_shared<api::PutReply>(*msg), node_ctx, op_ctx, msg->getTimestamp()),
@@ -34,7 +35,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucket_space)
{
}
@@ -156,7 +157,7 @@ void PutOperation::start_conditional_put(DistributorStripeMessageSender& sender)
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric, _msg->getTrace().getLevel());
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_put_dispatch(sender);
} else {
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index 6befb8d3e38..635accc1865 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -28,6 +28,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~PutOperation() override;
@@ -44,7 +45,7 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index 59ae4120fd6..96182b0744f 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -16,6 +16,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle)
: SequencedOperation(std::move(sequencingHandle)),
_tracker_instance(metric,
@@ -26,7 +27,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucketSpace),
_check_condition()
{
@@ -48,7 +49,7 @@ void RemoveOperation::start_conditional_remove(DistributorStripeMessageSender& s
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric, _msg->getTrace().getLevel());
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_remove_dispatch(sender);
} else {
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index 349a6182937..9f3a98294ea 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -19,6 +19,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~RemoveOperation() override;
@@ -36,7 +37,7 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 0cb4b223c11..73c65f54b21 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -34,6 +34,7 @@ TwoPhaseUpdateOperation::TwoPhaseUpdateOperation(
: SequencedOperation(std::move(sequencingHandle)),
_updateMetric(metrics.updates),
_putMetric(metrics.update_puts),
+ _put_condition_probe_metrics(metrics.put_condition_probes), // Updates never trigger put write repair, so we sneakily use a ref to someone else
_getMetric(metrics.update_gets),
_metadata_get_metrics(metrics.update_metadata_gets),
_updateCmd(std::move(msg)),
@@ -263,7 +264,7 @@ TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<documen
document::Bucket bucket(_updateCmd->getBucket().getBucketSpace(), document::BucketId(0));
auto put = std::make_shared<api::PutCommand>(bucket, doc, putTimestamp);
copyMessageSettings(*_updateCmd, *put);
- auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric);
+ auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric, _put_condition_probe_metrics);
PutOperation & op = *putOperation;
IntermediateMessageSender intermediate(_sentMessageMap, std::move(putOperation), sender);
op.start(intermediate, _node_ctx.clock().getSystemTime());
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
index 486ed766510..d2ad5359fa6 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
@@ -139,6 +139,7 @@ private:
UpdateMetricSet& _updateMetric;
PersistenceOperationMetricSet& _putMetric;
+ PersistenceOperationMetricSet& _put_condition_probe_metrics;
PersistenceOperationMetricSet& _getMetric;
PersistenceOperationMetricSet& _metadata_get_metrics;
std::shared_ptr<api::UpdateCommand> _updateCmd;
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
index 944b4bafa0a..e66884c4060 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
@@ -58,8 +58,8 @@ PersistenceFailuresMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyType
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceFailuresMetricSet*)
- (new PersistenceFailuresMetricSet(owner))->assignValues(*this);
+ return dynamic_cast<PersistenceFailuresMetricSet*>(
+ (new PersistenceFailuresMetricSet(owner))->assignValues(*this));
}
PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name, MetricSet* owner)
@@ -69,6 +69,11 @@ PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string&
failures(this)
{ }
+PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name)
+ : PersistenceOperationMetricSet(name, nullptr)
+{
+}
+
PersistenceOperationMetricSet::~PersistenceOperationMetricSet() = default;
MetricSet *
@@ -78,9 +83,8 @@ PersistenceOperationMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyTyp
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceOperationMetricSet*)
- (new PersistenceOperationMetricSet(getName(), owner))
- ->assignValues(*this);
+ return dynamic_cast<PersistenceOperationMetricSet*>(
+ (new PersistenceOperationMetricSet(getName(), owner))->assignValues(*this));
}
void
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
index b818d1bdd9f..eb1c3f57252 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
@@ -40,10 +40,11 @@ class PersistenceOperationMetricSet : public metrics::MetricSet
mutable std::mutex _mutex;
public:
metrics::DoubleAverageMetric latency;
- metrics::LongCountMetric ok;
+ metrics::LongCountMetric ok;
PersistenceFailuresMetricSet failures;
- PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner = nullptr);
+ PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner);
+ explicit PersistenceOperationMetricSet(const std::string& name);
~PersistenceOperationMetricSet() override;
MetricSet * clone(std::vector<Metric::UP>& ownerList, CopyType copyType,
@@ -57,7 +58,6 @@ public:
*/
void updateFromResult(const api::ReturnCode& result);
- friend class LockWrapper;
class LockWrapper {
std::unique_lock<std::mutex> _lock;
PersistenceOperationMetricSet& _self;
diff --git a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
index db2e5ac5f95..9ccd0588d6d 100644
--- a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
+++ b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
@@ -35,7 +35,7 @@ class EnabledInRegionsCondition implements ExecutionCondition {
return ConditionEvaluationResult.enabled(EnabledInRegions.class.getSimpleName() + " is not present");
List<String> enablingRegions = List.of(annotation.get().value());
- String thisRegion = TestRuntime.get().application().instance();
+ String thisRegion = TestRuntime.get().zone().region();
String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", enablingRegions), thisRegion);
return enablingRegions.contains(thisRegion) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason);
}