diff options
1372 files changed, 20874 insertions, 12683 deletions
diff --git a/abi-check-plugin/pom.xml b/abi-check-plugin/pom.xml index c19093e5ffb..03b2e7c8b60 100644 --- a/abi-check-plugin/pom.xml +++ b/abi-check-plugin/pom.xml @@ -45,7 +45,6 @@ <dependency> <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-api</artifactId> - <version>5.3.1</version> <scope>test</scope> </dependency> <dependency> diff --git a/annotations/src/main/java/com/yahoo/api/annotations/Beta.java b/annotations/src/main/java/com/yahoo/api/annotations/Beta.java new file mode 100644 index 00000000000..c555fbbcb57 --- /dev/null +++ b/annotations/src/main/java/com/yahoo/api/annotations/Beta.java @@ -0,0 +1,23 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.api.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that the annotated Java type/method/constructor is under development and may still change before they stabilize. + * Should only be used on a type that part of a package annotated with {@link PublicApi}. + * + * @see <a href="https://docs.vespa.ai/en/vespa-versions.html">https://docs.vespa.ai/en/vespa-versions.html</a> + * + * @author bjorncs + */ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.CONSTRUCTOR, + ElementType.METHOD, + ElementType.TYPE +}) +public @interface Beta {} diff --git a/application/abi-spec.json b/application/abi-spec.json index 5c298471b9c..21eacb152c0 100644 --- a/application/abi-spec.json +++ b/application/abi-spec.json @@ -93,7 +93,9 @@ "public void <init>()", "public void <init>(com.yahoo.application.MockApplicationConfig)", "public com.yahoo.application.MockApplicationConfig$Builder mystruct(com.yahoo.application.MockApplicationConfig$Mystruct$Builder)", + "public com.yahoo.application.MockApplicationConfig$Builder mystruct(java.util.function.Consumer)", "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(com.yahoo.application.MockApplicationConfig$Mystructlist$Builder)", + "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(java.util.function.Consumer)", "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(java.util.List)", "public com.yahoo.application.MockApplicationConfig$Builder mylist(java.lang.String)", "public com.yahoo.application.MockApplicationConfig$Builder mylist(java.util.Collection)", @@ -101,6 +103,7 @@ "public com.yahoo.application.MockApplicationConfig$Builder mymap(java.util.Map)", "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.lang.String, com.yahoo.application.MockApplicationConfig$Mymapstruct$Builder)", "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.util.Map)", + "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -347,7 +350,7 @@ "public final com.yahoo.processing.Response process(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)", "protected abstract com.yahoo.processing.Response doProcess(com.yahoo.component.chain.Chain, com.yahoo.processing.Request)", "public final byte[] processAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)", - "protected abstract com.google.common.util.concurrent.ListenableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)", + "protected abstract java.util.concurrent.CompletableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)", "protected com.yahoo.component.chain.Chain getChain(com.yahoo.component.ComponentSpecification)", "protected final com.yahoo.processing.rendering.Renderer getRenderer(com.yahoo.component.ComponentSpecification)", "protected abstract com.yahoo.processing.rendering.Renderer doGetRenderer(com.yahoo.component.ComponentSpecification)" diff --git a/application/src/main/java/com/yahoo/application/Application.java b/application/src/main/java/com/yahoo/application/Application.java index c3ae7e3770c..1b81897b230 100644 --- a/application/src/main/java/com/yahoo/application/Application.java +++ b/application/src/main/java/com/yahoo/application/Application.java @@ -7,7 +7,7 @@ import ai.vespa.rankingexpression.importer.onnx.OnnxImporter; import ai.vespa.rankingexpression.importer.tensorflow.TensorFlowImporter; import ai.vespa.rankingexpression.importer.vespa.VespaImporter; import ai.vespa.rankingexpression.importer.xgboost.XGBoostImporter; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.application.container.JDisc; import com.yahoo.application.container.impl.StandaloneContainerRunner; import com.yahoo.application.content.ContentCluster; diff --git a/application/src/main/java/com/yahoo/application/ApplicationBuilder.java b/application/src/main/java/com/yahoo/application/ApplicationBuilder.java index 639d546a442..0bfeaea475c 100644 --- a/application/src/main/java/com/yahoo/application/ApplicationBuilder.java +++ b/application/src/main/java/com/yahoo/application/ApplicationBuilder.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.text.StringUtilities; import com.yahoo.text.Utf8; diff --git a/application/src/main/java/com/yahoo/application/container/DocumentProcessing.java b/application/src/main/java/com/yahoo/application/container/DocumentProcessing.java index 1dcf138f9a9..f86c11d431c 100644 --- a/application/src/main/java/com/yahoo/application/container/DocumentProcessing.java +++ b/application/src/main/java/com/yahoo/application/container/DocumentProcessing.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.ComponentSpecification; import com.yahoo.docproc.DocprocExecutor; import com.yahoo.docproc.DocprocService; diff --git a/application/src/main/java/com/yahoo/application/container/JDisc.java b/application/src/main/java/com/yahoo/application/container/JDisc.java index 1b04445d6d5..9370a8f7ee0 100644 --- a/application/src/main/java/com/yahoo/application/container/JDisc.java +++ b/application/src/main/java/com/yahoo/application/container/JDisc.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.inject.AbstractModule; import com.google.inject.Module; import com.google.inject.name.Names; diff --git a/application/src/main/java/com/yahoo/application/container/Processing.java b/application/src/main/java/com/yahoo/application/container/Processing.java index 49432f2706a..4ca367ea720 100644 --- a/application/src/main/java/com/yahoo/application/container/Processing.java +++ b/application/src/main/java/com/yahoo/application/container/Processing.java @@ -1,8 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; @@ -15,6 +14,7 @@ import com.yahoo.processing.rendering.Renderer; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Einar M R Rosenvinge @@ -41,14 +41,14 @@ public final class Processing extends ProcessingBase<Request, Response, Processo } @Override - protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - Request request, - Renderer<Response> renderer, - ByteArrayOutputStream stream) throws IOException { + protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + Request request, + Renderer<Response> renderer, + ByteArrayOutputStream stream) throws IOException { Execution execution = handler.createExecution(getChain(chainSpec), request); Response response = execution.process(request); - return renderer.render(stream, response, execution, request); + return renderer.renderResponse(stream, response, execution, request); } @Override diff --git a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java index 5ee791ca3d1..96866b94e29 100644 --- a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java +++ b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java @@ -1,21 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.chain.ChainRegistry; -import com.yahoo.processing.rendering.AsynchronousRenderer; import com.yahoo.processing.rendering.Renderer; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; /** * @author gjoranv @@ -45,13 +43,13 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R REQUEST request) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); Renderer<RESPONSE> renderer = getRenderer(rendererSpec); - ListenableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream); + CompletableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream); awaitFuture(renderTask); return stream.toByteArray(); } - private void awaitFuture(ListenableFuture<Boolean> renderTask) { + private void awaitFuture(CompletableFuture<Boolean> renderTask) { try { renderTask.get(); } catch (InterruptedException | ExecutionException e) { @@ -59,10 +57,10 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R } } - protected abstract ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - REQUEST request, - Renderer<RESPONSE> renderer, - ByteArrayOutputStream stream) throws IOException ; + protected abstract CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + REQUEST request, + Renderer<RESPONSE> renderer, + ByteArrayOutputStream stream) throws IOException ; protected Chain<PROCESSOR> getChain(ComponentSpecification chainSpec) { Chain<PROCESSOR> chain = getChains().getComponent(chainSpec); diff --git a/application/src/main/java/com/yahoo/application/container/Search.java b/application/src/main/java/com/yahoo/application/container/Search.java index 34e6e30e47a..6a2f728fbcc 100644 --- a/application/src/main/java/com/yahoo/application/container/Search.java +++ b/application/src/main/java/com/yahoo/application/container/Search.java @@ -1,8 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.execution.chain.ChainRegistry; @@ -12,10 +11,10 @@ import com.yahoo.search.Result; import com.yahoo.search.Searcher; import com.yahoo.search.handler.HttpSearchResponse; import com.yahoo.search.handler.SearchHandler; -import com.yahoo.search.searchchain.SearchChainRegistry; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Einar M R Rosenvinge @@ -41,12 +40,12 @@ public final class Search extends ProcessingBase<Query, Result, Searcher> { } @Override - protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - Query request, - Renderer<Result> renderer, - ByteArrayOutputStream stream) throws IOException { + protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + Query request, + Renderer<Result> renderer, + ByteArrayOutputStream stream) throws IOException { Result result = process(chainSpec, request); - return HttpSearchResponse.waitableRender(result, result.getQuery(), renderer, stream); + return HttpSearchResponse.asyncRender(result, result.getQuery(), renderer, stream); } @Override diff --git a/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java b/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java index a53a4bdf97c..87d1eff05ab 100644 --- a/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java +++ b/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.application.container.handler.Request; import com.yahoo.application.container.handler.Response; import com.yahoo.jdisc.handler.CompletionHandler; diff --git a/application/src/main/java/com/yahoo/application/container/handler/Headers.java b/application/src/main/java/com/yahoo/application/container/handler/Headers.java index e5264d08dd5..3e0c148bcc7 100644 --- a/application/src/main/java/com/yahoo/application/container/handler/Headers.java +++ b/application/src/main/java/com/yahoo/application/container/handler/Headers.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container.handler; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.jdisc.HeaderFields; import javax.annotation.concurrent.NotThreadSafe; diff --git a/application/src/main/java/com/yahoo/application/container/handler/Request.java b/application/src/main/java/com/yahoo/application/container/handler/Request.java index 620d6080037..1606af498fa 100644 --- a/application/src/main/java/com/yahoo/application/container/handler/Request.java +++ b/application/src/main/java/com/yahoo/application/container/handler/Request.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container.handler; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import net.jcip.annotations.Immutable; import java.nio.charset.StandardCharsets; diff --git a/application/src/main/java/com/yahoo/application/container/handler/Response.java b/application/src/main/java/com/yahoo/application/container/handler/Response.java index 0fc1261197c..91632068328 100644 --- a/application/src/main/java/com/yahoo/application/container/handler/Response.java +++ b/application/src/main/java/com/yahoo/application/container/handler/Response.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container.handler; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.jdisc.http.HttpHeaders; import com.yahoo.text.Utf8; import net.jcip.annotations.Immutable; diff --git a/application/src/main/java/com/yahoo/application/content/ContentCluster.java b/application/src/main/java/com/yahoo/application/content/ContentCluster.java index ca26971ef93..ee29ee64a9d 100644 --- a/application/src/main/java/com/yahoo/application/content/ContentCluster.java +++ b/application/src/main/java/com/yahoo/application/content/ContentCluster.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.content; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import java.nio.file.Path; import java.util.Collections; diff --git a/application/src/test/java/com/yahoo/application/ApplicationFacade.java b/application/src/test/java/com/yahoo/application/ApplicationFacade.java index c84f7fca3f2..aaca14d510b 100644 --- a/application/src/test/java/com/yahoo/application/ApplicationFacade.java +++ b/application/src/test/java/com/yahoo/application/ApplicationFacade.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.application.container.handler.Request; import com.yahoo.application.container.handler.Response; import com.yahoo.component.Component; diff --git a/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java b/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java index d8c86728160..d069b345b93 100644 --- a/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java +++ b/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java @@ -1,10 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container.docprocs; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.docproc.DocumentProcessor; import com.yahoo.docproc.Processing; -import com.yahoo.document.Document; import com.yahoo.document.DocumentOperation; import com.yahoo.document.DocumentPut; import com.yahoo.documentapi.messagebus.protocol.DocumentMessage; @@ -40,7 +38,7 @@ public class MockDispatchDocproc extends DocumentProcessor { public Progress process(Processing processing) { for (DocumentOperation op : processing.getDocumentOperations()) { PutDocumentMessage message = new PutDocumentMessage((DocumentPut)op); - ListenableFuture<Response> future = createRequest(message).dispatch(); + var future = createRequest(message).dispatch(); try { responses.add(future.get()); } catch (ExecutionException | InterruptedException e) { diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java index de9ae889e2d..fdb598639a7 100644 --- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java +++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java @@ -6,7 +6,8 @@ import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; import com.yahoo.container.jdisc.secretstore.SecretStore; -import com.yahoo.jdisc.http.servlet.ServletRequest; +import com.yahoo.jdisc.http.server.jetty.RequestUtils; + import java.util.logging.Level; import com.yahoo.restapi.ErrorResponse; import com.yahoo.restapi.Path; @@ -168,7 +169,7 @@ public class CertificateAuthorityApiHandler extends LoggingRequestHandler { } private List<X509Certificate> getRequestCertificateChain(HttpRequest request) { - return Optional.ofNullable(request.getJDiscRequest().context().get(ServletRequest.JDISC_REQUEST_X509CERT)) + return Optional.ofNullable(request.getJDiscRequest().context().get(RequestUtils.JDISC_REQUEST_X509CERT)) .map(X509Certificate[].class::cast) .map(Arrays::asList) .orElse(Collections.emptyList()); diff --git a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiTest.java b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiTest.java index 03ff057fa11..7bfc4ad41a4 100644 --- a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiTest.java +++ b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiTest.java @@ -2,7 +2,7 @@ package com.yahoo.vespa.hosted.ca.restapi; import com.yahoo.application.container.handler.Request; -import com.yahoo.jdisc.http.servlet.ServletRequest; +import com.yahoo.jdisc.http.server.jetty.RequestUtils; import com.yahoo.security.KeyAlgorithm; import com.yahoo.security.KeyUtils; import com.yahoo.security.Pkcs10Csr; @@ -95,7 +95,7 @@ public class CertificateAuthorityApiTest extends ContainerTester { instanceRefreshJson(csr), Request.Method.POST, principal); - request.getAttributes().put(ServletRequest.JDISC_REQUEST_X509CERT, new X509Certificate[]{certificate}); + request.getAttributes().put(RequestUtils.JDISC_REQUEST_X509CERT, new X509Certificate[]{certificate}); assertIdentityResponse(request); // POST instance refresh with ZTS client @@ -136,7 +136,7 @@ public class CertificateAuthorityApiTest extends ContainerTester { instanceRefreshJson(csr), Request.Method.POST, principal); - request.getAttributes().put(ServletRequest.JDISC_REQUEST_X509CERT, new X509Certificate[]{cert}); + request.getAttributes().put(RequestUtils.JDISC_REQUEST_X509CERT, new X509Certificate[]{cert}); assertResponse( 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"POST http://localhost:12345/ca/v1/instance/vespa.external.provider_prod_us-north-1/vespa.external/tenant/foobar failed: Mismatch between instance ID in URL path and instance ID in CSR [instanceId=foobar,instanceIdFromCsr=1.cluster1.default.app1.tenant1.us-north-1.prod.node]\"}", diff --git a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/mock/PrincipalFromHeaderFilter.java b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/mock/PrincipalFromHeaderFilter.java index 9ed8102190c..df98ba75dd2 100644 --- a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/mock/PrincipalFromHeaderFilter.java +++ b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/ca/restapi/mock/PrincipalFromHeaderFilter.java @@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.ca.restapi.mock; import com.yahoo.jdisc.handler.ResponseHandler; import com.yahoo.jdisc.http.filter.DiscFilterRequest; import com.yahoo.jdisc.http.filter.SecurityRequestFilter; -import com.yahoo.jdisc.http.servlet.ServletRequest; +import com.yahoo.jdisc.http.server.jetty.RequestUtils; import com.yahoo.security.X509CertificateUtils; import com.yahoo.text.StringUtilities; import com.yahoo.vespa.athenz.api.AthenzPrincipal; @@ -28,7 +28,7 @@ public class PrincipalFromHeaderFilter implements SecurityRequestFilter { Optional<String> certificate = Optional.ofNullable(request.getHeader("CERTIFICATE")); certificate.ifPresent(cert -> { var x509cert = X509CertificateUtils.fromPem(StringUtilities.unescape(cert)); - request.setAttribute(ServletRequest.JDISC_REQUEST_X509CERT, new X509Certificate[]{x509cert}); + request.setAttribute(RequestUtils.JDISC_REQUEST_X509CERT, new X509Certificate[]{x509cert}); }); } } diff --git a/client/go/Makefile b/client/go/Makefile index d53e8f46e03..1d995c7f5bf 100644 --- a/client/go/Makefile +++ b/client/go/Makefile @@ -16,6 +16,13 @@ GO_FLAGS := -ldflags "-X github.com/vespa-engine/vespa/client/go/build.Version=$ GIT_ROOT := $(shell git rev-parse --show-toplevel) DIST_TARGETS := dist-mac dist-mac-arm64 dist-linux dist-linux-arm64 dist-win32 dist-win64 +GOPROXY_OVERRIDE := +ifndef GOPROXY +ifeq ($(shell go env GOPROXY),direct) +GOPROXY_OVERRIDE := GOPROXY=https://proxy.golang.org,direct +endif +endif + all: test checkfmt install # @@ -75,7 +82,7 @@ $(DIST_TARGETS): DIST_NAME=vespa-cli_$(VERSION)_$(GOOS)_$(GOARCH) $(DIST_TARGETS): dist-version manpages $(DIST_TARGETS): mkdir -p $(DIST)/$(DIST_NAME)/bin - env GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) ./... + env GOOS=$(GOOS) GOARCH=$(GOARCH) $(GOPROXY_OVERRIDE) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) ./... cp -a $(GIT_ROOT)/LICENSE $(DIST)/$(DIST_NAME) if [ "$(GOOS)" = "windows" ]; then \ cd $(DIST) && zip -r $(DIST)/$(DIST_NAME).zip $(DIST_NAME); \ @@ -92,22 +99,27 @@ ifeq ($(VERSION),$(DEVEL_VERSION)) $(error Invalid release version: $(VERSION). Try 'git checkout vX.Y.Z' or 'env VERSION=X.Y.Z make ...') endif +install-all: all manpages + # # Development targets # install: - env GOBIN=$(BIN) go install $(GO_FLAGS) ./... + env GOBIN=$(BIN) $(GOPROXY_OVERRIDE) go install $(GO_FLAGS) ./... manpages: install mkdir -p $(SHARE)/man/man1 $(BIN)/vespa man $(SHARE)/man/man1 clean: - rm -rf $(BIN) $(SHARE) $(DIST) + rm -rf $(DIST) + rm -f $(BIN)/vespa $(SHARE)/man/man1/vespa.1 $(SHARE)/man/man1/vespa-*.1 + rmdir -p $(BIN) &> /dev/null || true + rmdir -p $(SHARE)/man/man1 &> /dev/null || true test: - go test ./... + env $(GOPROXY_OVERRIDE) go test ./... checkfmt: @bash -c "diff --line-format='%L' <(echo -n) <(gofmt -l .)" || { echo "one or more files need to be formatted: try make fmt to fix this automatically"; exit 1; } diff --git a/client/go/README.md b/client/go/README.md index d3803828b4c..e50f29e1961 100644 --- a/client/go/README.md +++ b/client/go/README.md @@ -9,5 +9,4 @@ Vespa documentation: https://docs.vespa.ai Developer notes: $ brew install go - $ make test - + $ make install diff --git a/client/go/auth0/auth0.go b/client/go/auth0/auth0.go index 377d56a1637..5acd4354274 100644 --- a/client/go/auth0/auth0.go +++ b/client/go/auth0/auth0.go @@ -25,14 +25,24 @@ import ( const accessTokenExpThreshold = 5 * time.Minute -var errUnauthenticated = errors.New("not logged in. Try 'vespa login'") +var errUnauthenticated = errors.New("not logged in. Try 'vespa auth login'") + +type configJsonFormat struct { + Version int `json:"version"` + Providers providers `json:"providers"` +} + +type providers struct { + Config config `json:"auth0"` +} type config struct { - Systems map[string]System `json:"systems"` + Version int `json:"version"` + Systems map[string]*System `json:"systems"` } type System struct { - Name string `json:"name"` + Name string `json:"-"` AccessToken string `json:"access_token,omitempty"` Scopes []string `json:"scopes,omitempty"` ExpiresAt time.Time `json:"expires_at"` @@ -130,19 +140,19 @@ func (a *Auth0) IsLoggedIn() bool { // The System access token needs a refresh if: // 1. the System scopes are different from the currently required scopes - (auth0 changes). // 2. the access token is expired. -func (a *Auth0) PrepareSystem(ctx context.Context) (System, error) { +func (a *Auth0) PrepareSystem(ctx context.Context) (*System, error) { if err := a.init(); err != nil { - return System{}, err + return nil, err } s, err := a.getSystem() if err != nil { - return System{}, err + return nil, err } if s.AccessToken == "" || scopesChanged(s) { s, err = RunLogin(ctx, a, true) if err != nil { - return System{}, err + return nil, err } } else if isExpired(s.ExpiresAt, accessTokenExpThreshold) { // check if the stored access token is expired: @@ -157,9 +167,10 @@ func (a *Auth0) PrepareSystem(ctx context.Context) (System, error) { if err != nil { // ask and guide the user through the login process: fmt.Println(fmt.Errorf("failed to renew access token, %s", err)) + fmt.Print("\n") s, err = RunLogin(ctx, a, true) if err != nil { - return System{}, err + return nil, err } } else { // persist the updated system with renewed access token @@ -170,7 +181,7 @@ func (a *Auth0) PrepareSystem(ctx context.Context) (System, error) { err = a.AddSystem(s) if err != nil { - return System{}, err + return nil, err } } } @@ -185,7 +196,7 @@ func isExpired(t time.Time, threshold time.Duration) bool { // scopesChanged compare the System scopes // with the currently required scopes. -func scopesChanged(s System) bool { +func scopesChanged(s *System) bool { want := auth.RequiredScopes() got := s.Scopes @@ -209,14 +220,14 @@ func scopesChanged(s System) bool { return false } -func (a *Auth0) getSystem() (System, error) { +func (a *Auth0) getSystem() (*System, error) { if err := a.init(); err != nil { - return System{}, err + return nil, err } s, ok := a.config.Systems[a.system] if !ok { - return System{}, fmt.Errorf("unable to find system: %s; run 'vespa login' to configure a new system", a.system) + return nil, fmt.Errorf("unable to find system: %s; run 'vespa auth login' to configure a new system", a.system) } return s, nil @@ -224,12 +235,12 @@ func (a *Auth0) getSystem() (System, error) { // AddSystem assigns an existing, or new System. This is expected to be called // after a login has completed. -func (a *Auth0) AddSystem(s System) error { +func (a *Auth0) AddSystem(s *System) error { _ = a.init() // If we're dealing with an empty file, we'll need to initialize this map. if a.config.Systems == nil { - a.config.Systems = map[string]System{} + a.config.Systems = map[string]*System{} } a.config.Systems[a.system] = s @@ -246,7 +257,7 @@ func (a *Auth0) removeSystem(s string) error { // If we're dealing with an empty file, we'll need to initialize this map. if a.config.Systems == nil { - a.config.Systems = map[string]System{} + a.config.Systems = map[string]*System{} } delete(a.config.Systems, s) @@ -271,7 +282,7 @@ func (a *Auth0) persistConfig() error { } } - buf, err := json.MarshalIndent(a.config, "", " ") + buf, err := a.configToJson(&a.config) if err != nil { return err } @@ -283,6 +294,31 @@ func (a *Auth0) persistConfig() error { return nil } +func (a *Auth0) configToJson(cfg *config) ([]byte, error) { + cfg.Version = 1 + r := configJsonFormat{ + Version: 1, + Providers: providers{ + Config: *cfg, + }, + } + return json.MarshalIndent(r, "", " ") +} + +func (a *Auth0) jsonToConfig(buf []byte) (*config, error) { + r := configJsonFormat{} + if err := json.Unmarshal(buf, &r); err != nil { + return nil, err + } + cfg := r.Providers.Config + if cfg.Systems != nil { + for n, s := range cfg.Systems { + s.Name = n + } + } + return &cfg, nil +} + func (a *Auth0) init() error { a.initOnce.Do(func() { if a.errOnce = a.initContext(); a.errOnce != nil { @@ -302,10 +338,11 @@ func (a *Auth0) initContext() (err error) { return err } - if err := json.Unmarshal(buf, &a.config); err != nil { + cfg, err := a.jsonToConfig(buf) + if err != nil { return err } - + a.config = *cfg return nil } @@ -313,17 +350,21 @@ func (a *Auth0) initContext() (err error) { // by showing the login instructions, opening the browser. // Use `expired` to run the login from other commands setup: // this will only affect the messages. -func RunLogin(ctx context.Context, a *Auth0, expired bool) (System, error) { +func RunLogin(ctx context.Context, a *Auth0, expired bool) (*System, error) { if expired { fmt.Println("Please sign in to re-authorize the CLI.") } state, err := a.Authenticator.Start(ctx) if err != nil { - return System{}, fmt.Errorf("could not start the authentication process: %w", err) + return nil, fmt.Errorf("could not start the authentication process: %w", err) } fmt.Printf("Your Device Confirmation code is: %s\n\n", state.UserCode) + + fmt.Println("If you prefer, you can open the URL directly for verification") + fmt.Printf("Your Verification URL: %s\n\n", state.VerificationURI) + fmt.Println("Press Enter to open the browser to log in or ^C to quit...") fmt.Scanln() @@ -334,13 +375,13 @@ func RunLogin(ctx context.Context, a *Auth0, expired bool) (System, error) { } var res auth.Result - err = util.Spinner("Waiting for login to complete in browser", func() error { + err = util.Spinner("Waiting for login to complete in browser ...", func() error { res, err = a.Authenticator.Wait(ctx, state) return err }) if err != nil { - return System{}, fmt.Errorf("login error: %w", err) + return nil, fmt.Errorf("login error: %w", err) } fmt.Print("\n") @@ -361,12 +402,12 @@ func RunLogin(ctx context.Context, a *Auth0, expired bool) (System, error) { ExpiresAt: time.Now().Add(time.Duration(res.ExpiresIn) * time.Second), Scopes: auth.RequiredScopes(), } - err = a.AddSystem(s) + err = a.AddSystem(&s) if err != nil { - return System{}, fmt.Errorf("could not add system to config: %w", err) + return nil, fmt.Errorf("could not add system to config: %w", err) } - return s, nil + return &s, nil } func RunLogout(a *Auth0) error { diff --git a/client/go/cmd/api_key.go b/client/go/cmd/api_key.go index 9832f04e3f0..032d98c96fe 100644 --- a/client/go/cmd/api_key.go +++ b/client/go/cmd/api_key.go @@ -16,52 +16,74 @@ import ( var overwriteKey bool func init() { - rootCmd.AddCommand(apiKeyCmd) apiKeyCmd.Flags().BoolVarP(&overwriteKey, "force", "f", false, "Force overwrite of existing API key") apiKeyCmd.MarkPersistentFlagRequired(applicationFlag) } +var example string + +func apiKeyExample() string { + if vespa.Auth0AccessTokenEnabled() { + return "$ vespa auth api-key -a my-tenant.my-app.my-instance" + } else { + return "$ vespa api-key -a my-tenant.my-app.my-instance" + } +} + var apiKeyCmd = &cobra.Command{ Use: "api-key", Short: "Create a new user API key for authentication with Vespa Cloud", - Example: "$ vespa api-key -a my-tenant.my-app.my-instance", + Example: apiKeyExample(), DisableAutoGenTag: true, Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - cfg, err := LoadConfig() - if err != nil { - fatalErr(err, "Could not load config") - return - } - app := getApplication() - apiKeyFile := cfg.APIKeyPath(app.Tenant) - if util.PathExists(apiKeyFile) && !overwriteKey { - printErrHint(fmt.Errorf("File %s already exists", apiKeyFile), "Use -f to overwrite it") - printPublicKey(apiKeyFile, app.Tenant) - return - } - apiKey, err := vespa.CreateAPIKey() - if err != nil { - fatalErr(err, "Could not create API key") - return - } - if err := ioutil.WriteFile(apiKeyFile, apiKey, 0600); err == nil { - printSuccess("API private key written to ", apiKeyFile) - printPublicKey(apiKeyFile, app.Tenant) - if vespa.Auth0AccessTokenEnabled() { - if err == nil { - if err := cfg.Set(cloudAuthFlag, "api-key"); err != nil { - fatalErr(err, "Could not write config") - } - if err := cfg.Write(); err != nil { - fatalErr(err) - } + Run: doApiKey, +} + +var deprecatedApiKeyCmd = &cobra.Command{ + Use: "api-key", + Short: "Create a new user API key for authentication with Vespa Cloud", + Example: apiKeyExample(), + DisableAutoGenTag: true, + Args: cobra.ExactArgs(0), + Hidden: true, + Deprecated: "use 'vespa auth api-key' instead", + Run: doApiKey, +} + +func doApiKey(_ *cobra.Command, _ []string) { + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + app := getApplication() + apiKeyFile := cfg.APIKeyPath(app.Tenant) + if util.PathExists(apiKeyFile) && !overwriteKey { + printErrHint(fmt.Errorf("File %s already exists", apiKeyFile), "Use -f to overwrite it") + printPublicKey(apiKeyFile, app.Tenant) + return + } + apiKey, err := vespa.CreateAPIKey() + if err != nil { + fatalErr(err, "Could not create API key") + return + } + if err := ioutil.WriteFile(apiKeyFile, apiKey, 0600); err == nil { + printSuccess("API private key written to ", apiKeyFile) + printPublicKey(apiKeyFile, app.Tenant) + if vespa.Auth0AccessTokenEnabled() { + if err == nil { + if err := cfg.Set(cloudAuthFlag, "api-key"); err != nil { + fatalErr(err, "Could not write config") + } + if err := cfg.Write(); err != nil { + fatalErr(err) } } - } else { - fatalErr(err, "Failed to write ", apiKeyFile) } - }, + } else { + fatalErr(err, "Failed to write ", apiKeyFile) + } } func printPublicKey(apiKeyFile, tenant string) { diff --git a/client/go/cmd/auth.go b/client/go/cmd/auth.go new file mode 100644 index 00000000000..9322f8d0808 --- /dev/null +++ b/client/go/cmd/auth.go @@ -0,0 +1,34 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "github.com/vespa-engine/vespa/client/go/vespa" +) + +func init() { + if vespa.Auth0AccessTokenEnabled() { + rootCmd.AddCommand(authCmd) + rootCmd.AddCommand(deprecatedCertCmd) + rootCmd.AddCommand(deprecatedApiKeyCmd) + authCmd.AddCommand(certCmd) + authCmd.AddCommand(apiKeyCmd) + authCmd.AddCommand(loginCmd) + authCmd.AddCommand(logoutCmd) + } else { + rootCmd.AddCommand(certCmd) + rootCmd.AddCommand(apiKeyCmd) + } +} + +var authCmd = &cobra.Command{ + Use: "auth", + Short: "Manage Vespa Cloud credentials", + Long: `Manage Vespa Cloud credentials.`, + + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + // Root command does nothing + cmd.Help() + exitFunc(1) + }, +} diff --git a/client/go/cmd/cert.go b/client/go/cmd/cert.go index eaf3fc564dd..e79a45d3af8 100644 --- a/client/go/cmd/cert.go +++ b/client/go/cmd/cert.go @@ -5,96 +5,121 @@ package cmd import ( "fmt" - "os" - "path/filepath" - "github.com/spf13/cobra" "github.com/vespa-engine/vespa/client/go/util" "github.com/vespa-engine/vespa/client/go/vespa" + "os" + "path/filepath" ) var overwriteCertificate bool func init() { - rootCmd.AddCommand(certCmd) certCmd.Flags().BoolVarP(&overwriteCertificate, "force", "f", false, "Force overwrite of existing certificate and private key") certCmd.MarkPersistentFlagRequired(applicationFlag) } +func certExample() string { + if vespa.Auth0AccessTokenEnabled() { + return "$ vespa auth cert -a my-tenant.my-app.my-instance" + } else { + return "$ vespa cert -a my-tenant.my-app.my-instance" + } +} + var certCmd = &cobra.Command{ Use: "cert", Short: "Create a new private key and self-signed certificate for Vespa Cloud deployment", + Example: certExample(), + DisableAutoGenTag: true, + Args: cobra.MaximumNArgs(1), + Run: doCert, +} + +var deprecatedCertCmd = &cobra.Command{ + Use: "cert", + Short: "Create a new private key and self-signed certificate for Vespa Cloud deployment", Example: "$ vespa cert -a my-tenant.my-app.my-instance", DisableAutoGenTag: true, Args: cobra.MaximumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - app := getApplication() - pkg, err := vespa.FindApplicationPackage(applicationSource(args), false) - if err != nil { - fatalErr(err) - return - } - cfg, err := LoadConfig() - if err != nil { - fatalErr(err) - return - } - privateKeyFile, err := cfg.PrivateKeyPath(app) - if err != nil { - fatalErr(err) - return - } - certificateFile, err := cfg.CertificatePath(app) - if err != nil { - fatalErr(err) - return - } + Deprecated: "use 'vespa auth cert' instead", + Hidden: true, + Run: doCert, +} - if !overwriteCertificate { - hint := "Use -f flag to force overwriting" - if pkg.HasCertificate() { - fatalErrHint(fmt.Errorf("Application package %s already contains a certificate", pkg.Path), hint) - return - } - if util.PathExists(privateKeyFile) { - fatalErrHint(fmt.Errorf("Private key %s already exists", color.Cyan(privateKeyFile)), hint) - return - } - if util.PathExists(certificateFile) { - fatalErrHint(fmt.Errorf("Certificate %s already exists", color.Cyan(certificateFile)), hint) - return - } - } - if pkg.IsZip() { - fatalErrHint(fmt.Errorf("Cannot add certificate to compressed application package %s", pkg.Path), - "Try running 'mvn clean' before 'vespa cert', and then 'mvn package'") - return - } +func doCert(_ *cobra.Command, args []string) { + app := getApplication() + pkg, err := vespa.FindApplicationPackage(applicationSource(args), false) + if err != nil { + fatalErr(err) + return + } + cfg, err := LoadConfig() + if err != nil { + fatalErr(err) + return + } + privateKeyFile, err := cfg.PrivateKeyPath(app) + if err != nil { + fatalErr(err) + return + } + certificateFile, err := cfg.CertificatePath(app) + if err != nil { + fatalErr(err) + return + } - keyPair, err := vespa.CreateKeyPair() - if err != nil { - fatalErr(err, "Could not create key pair") + if !overwriteCertificate { + hint := "Use -f flag to force overwriting" + if pkg.HasCertificate() { + fatalErrHint(fmt.Errorf("Application package %s already contains a certificate", pkg.Path), hint) return } - pkgCertificateFile := filepath.Join(pkg.Path, "security", "clients.pem") - if err := os.MkdirAll(filepath.Dir(pkgCertificateFile), 0755); err != nil { - fatalErr(err, "Could not create security directory") + if util.PathExists(privateKeyFile) { + fatalErrHint(fmt.Errorf("Private key %s already exists", color.Cyan(privateKeyFile)), hint) return } - if err := keyPair.WriteCertificateFile(pkgCertificateFile, overwriteCertificate); err != nil { - fatalErr(err, "Could not write certificate") + if util.PathExists(certificateFile) { + fatalErrHint(fmt.Errorf("Certificate %s already exists", color.Cyan(certificateFile)), hint) return } - if err := keyPair.WriteCertificateFile(certificateFile, overwriteCertificate); err != nil { - fatalErr(err, "Could not write certificate") - return + } + if pkg.IsZip() { + var msg string + if vespa.Auth0AccessTokenEnabled() { + msg = "Try running 'mvn clean' before 'vespa auth cert', and then 'mvn package'" + } else { + msg = "Try running 'mvn clean' before 'vespa cert', and then 'mvn package'" } - if err := keyPair.WritePrivateKeyFile(privateKeyFile, overwriteCertificate); err != nil { - fatalErr(err, "Could not write private key") - return - } - printSuccess("Certificate written to ", color.Cyan(pkgCertificateFile)) - printSuccess("Certificate written to ", color.Cyan(certificateFile)) - printSuccess("Private key written to ", color.Cyan(privateKeyFile)) - }, + fatalErrHint(fmt.Errorf("Cannot add certificate to compressed application package %s", pkg.Path), + msg) + return + } + + keyPair, err := vespa.CreateKeyPair() + if err != nil { + fatalErr(err, "Could not create key pair") + return + } + pkgCertificateFile := filepath.Join(pkg.Path, "security", "clients.pem") + if err := os.MkdirAll(filepath.Dir(pkgCertificateFile), 0755); err != nil { + fatalErr(err, "Could not create security directory") + return + } + if err := keyPair.WriteCertificateFile(pkgCertificateFile, overwriteCertificate); err != nil { + fatalErr(err, "Could not write certificate") + return + } + if err := keyPair.WriteCertificateFile(certificateFile, overwriteCertificate); err != nil { + fatalErr(err, "Could not write certificate") + return + } + if err := keyPair.WritePrivateKeyFile(privateKeyFile, overwriteCertificate); err != nil { + fatalErr(err, "Could not write private key") + return + } + printSuccess("Certificate written to ", color.Cyan(pkgCertificateFile)) + printSuccess("Certificate written to ", color.Cyan(certificateFile)) + printSuccess("Private key written to ", color.Cyan(privateKeyFile)) } diff --git a/client/go/cmd/clone.go b/client/go/cmd/clone.go index 6550e24e094..6fe3c0d5a29 100644 --- a/client/go/cmd/clone.go +++ b/client/go/cmd/clone.go @@ -142,27 +142,29 @@ func getSampleAppsZip() *os.File { return f } - log.Print(color.Yellow("Downloading sample apps ...")) // TODO: Spawn thread to indicate progress - request, err := http.NewRequest("GET", "https://github.com/vespa-engine/sample-apps/archive/refs/heads/master.zip", nil) - if err != nil { - fatalErr(err, "Invalid URL") - return nil - } - response, err := util.HttpDo(request, time.Minute*60, "GitHub") - if err != nil { - fatalErr(err, "Could not download sample apps from GitHub") - return nil - } - defer response.Body.Close() - if response.StatusCode != 200 { - fatalErr(nil, "Could not download sample apps from GitHub: ", response.StatusCode) - return nil - } + err = util.Spinner(color.Yellow("Downloading sample apps ...").String(), func() error { + request, err := http.NewRequest("GET", "https://github.com/vespa-engine/sample-apps/archive/refs/heads/master.zip", nil) + if err != nil { + fatalErr(err, "Invalid URL") + return nil + } + response, err := util.HttpDo(request, time.Minute*60, "GitHub") + if err != nil { + fatalErr(err, "Could not download sample apps from GitHub") + return nil + } + defer response.Body.Close() + if response.StatusCode != 200 { + fatalErr(nil, "Could not download sample apps from GitHub: ", response.StatusCode) + return nil + } + if _, err := io.Copy(f, response.Body); err != nil { + fatalErr(err, "Could not write sample apps to file: ", f.Name()) + return nil + } + return err + }) - if _, err := io.Copy(f, response.Body); err != nil { - fatalErr(err, "Could not write sample apps to file: ", f.Name()) - return nil - } return f } diff --git a/client/go/cmd/command_tester.go b/client/go/cmd/command_tester.go index 2d2de6a201c..eb55021b536 100644 --- a/client/go/cmd/command_tester.go +++ b/client/go/cmd/command_tester.go @@ -127,4 +127,4 @@ func (c *mockHttpClient) Do(request *http.Request, timeout time.Duration) (*http nil } -func (c *mockHttpClient) UseCertificate(certificate tls.Certificate) {} +func (c *mockHttpClient) UseCertificate(certificates []tls.Certificate) {} diff --git a/client/go/cmd/config.go b/client/go/cmd/config.go index 750664e51b1..3a6e43e7ffe 100644 --- a/client/go/cmd/config.go +++ b/client/go/cmd/config.go @@ -133,10 +133,16 @@ func (c *Config) Write() error { } func (c *Config) CertificatePath(app vespa.ApplicationID) (string, error) { + if override, ok := os.LookupEnv("VESPA_CLI_DATA_PLANE_CERT_FILE"); ok { + return override, nil + } return c.applicationFilePath(app, "data-plane-public-cert.pem") } func (c *Config) PrivateKeyPath(app vespa.ApplicationID) (string, error) { + if override, ok := os.LookupEnv("VESPA_CLI_DATA_PLANE_KEY_FILE"); ok { + return override, nil + } return c.applicationFilePath(app, "data-plane-private-key.pem") } @@ -149,7 +155,7 @@ func (c *Config) ReadAPIKey(tenantName string) ([]byte, error) { } func (c *Config) AuthConfigPath() string { - return filepath.Join(c.Home, "auth0.json") + return filepath.Join(c.Home, "auth.json") } func (c *Config) ReadSessionID(app vespa.ApplicationID) (int64, error) { diff --git a/client/go/cmd/curl.go b/client/go/cmd/curl.go index bd9fad1b47e..2496ddc3abc 100644 --- a/client/go/cmd/curl.go +++ b/client/go/cmd/curl.go @@ -46,7 +46,7 @@ $ vespa curl -t local -- -v /search/?yql=query fatalErr(err) return } - service := getService("query", 0) + service := getService("query", 0, "") url := joinURL(service.BaseURL, args[len(args)-1]) rawArgs := args[:len(args)-1] c, err := curl.RawArgs(url, rawArgs...) diff --git a/client/go/cmd/deploy.go b/client/go/cmd/deploy.go index 034dac2e67b..ae39afc3773 100644 --- a/client/go/cmd/deploy.go +++ b/client/go/cmd/deploy.go @@ -18,8 +18,9 @@ const ( ) var ( - zoneArg string - logLevelArg string + zoneArg string + logLevelArg string + sessionOrRunID int64 ) func init() { @@ -63,7 +64,9 @@ $ vespa deploy -t cloud -z perf.aws-us-east-1c`, } target := getTarget() opts := getDeploymentOpts(cfg, pkg, target) + if sessionOrRunID, err := vespa.Deploy(opts); err == nil { + fmt.Print("\n") if opts.IsCloud() { printSuccess("Triggered deployment of ", color.Cyan(pkg.Path), " with run ID ", color.Cyan(sessionOrRunID)) } else { diff --git a/client/go/cmd/document.go b/client/go/cmd/document.go index cd0170684cf..84c384e701e 100644 --- a/client/go/cmd/document.go +++ b/client/go/cmd/document.go @@ -118,7 +118,7 @@ var documentGetCmd = &cobra.Command{ }, } -func documentService() *vespa.Service { return getService("document", 0) } +func documentService() *vespa.Service { return getService("document", 0, "") } func operationOptions() vespa.OperationOptions { return vespa.OperationOptions{ diff --git a/client/go/cmd/document_test.go b/client/go/cmd/document_test.go index 649aca8703a..f3a5fbe9543 100644 --- a/client/go/cmd/document_test.go +++ b/client/go/cmd/document_test.go @@ -161,5 +161,5 @@ func assertDocumentServerError(t *testing.T, status int, errorMessage string) { } func documentServiceURL(client *mockHttpClient) string { - return getService("document", 0).BaseURL + return getService("document", 0, "").BaseURL } diff --git a/client/go/cmd/helpers.go b/client/go/cmd/helpers.go index 79ba1fcef26..f065ae0c680 100644 --- a/client/go/cmd/helpers.go +++ b/client/go/cmd/helpers.go @@ -6,6 +6,7 @@ package cmd import ( "crypto/tls" + "encoding/json" "fmt" "io/ioutil" "log" @@ -129,19 +130,21 @@ func getTargetType() string { return target } -func getService(service string, sessionOrRunID int64) *vespa.Service { +func getService(service string, sessionOrRunID int64, cluster string) *vespa.Service { t := getTarget() timeout := time.Duration(waitSecsArg) * time.Second if timeout > 0 { log.Printf("Waiting up to %d %s for service to become available ...", color.Cyan(waitSecsArg), color.Cyan("seconds")) } - s, err := t.Service(service, timeout, sessionOrRunID) + s, err := t.Service(service, timeout, sessionOrRunID, cluster) if err != nil { fatalErr(err, "Invalid service: ", service) } return s } +func getEndpointsOverride() string { return os.Getenv("VESPA_CLI_ENDPOINTS") } + func getSystem() string { return os.Getenv("VESPA_CLI_CLOUD_SYSTEM") } func getSystemName() string { @@ -175,15 +178,17 @@ func getTarget() vespa.Target { case "local": return vespa.LocalTarget() case "cloud": - deployment := deploymentFromArgs() cfg, err := LoadConfig() if err != nil { fatalErr(err, "Could not load config") return nil } + deployment := deploymentFromArgs() + endpoints := getEndpointsFromEnv() + var apiKey []byte = nil apiKey, err = ioutil.ReadFile(cfg.APIKeyPath(deployment.Application.Tenant)) - if !vespa.Auth0AccessTokenEnabled() { + if !vespa.Auth0AccessTokenEnabled() && endpoints == nil { if err != nil { fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'") } @@ -200,7 +205,13 @@ func getTarget() vespa.Target { } kp, err := tls.LoadX509KeyPair(certificateFile, privateKeyFile) if err != nil { - fatalErrHint(err, "Deployment to cloud requires a certificate. Try 'vespa cert'") + var msg string + if vespa.Auth0AccessTokenEnabled() { + msg = "Deployment to cloud requires a certificate. Try 'vespa auth cert'" + } else { + msg = "Deployment to cloud requires a certificate. Try 'vespa cert'" + } + fatalErrHint(err, msg) } var cloudAuth string if vespa.Auth0AccessTokenEnabled() { @@ -228,14 +239,15 @@ func getTarget() vespa.Target { }, cfg.AuthConfigPath(), getSystemName(), - cloudAuth) + cloudAuth, + endpoints) } fatalErrHint(fmt.Errorf("Invalid target: %s", targetType), "Valid targets are 'local', 'cloud' or an URL") return nil } func waitForService(service string, sessionOrRunID int64) { - s := getService(service, sessionOrRunID) + s := getService(service, sessionOrRunID, "") timeout := time.Duration(waitSecsArg) * time.Second if timeout > 0 { log.Printf("Waiting up to %d %s for service to become ready ...", color.Cyan(waitSecsArg), color.Cyan("seconds")) @@ -256,7 +268,13 @@ func getDeploymentOpts(cfg *Config, pkg vespa.ApplicationPackage, target vespa.T if opts.IsCloud() { deployment := deploymentFromArgs() if !opts.ApplicationPackage.HasCertificate() { - fatalErrHint(fmt.Errorf("Missing certificate in application package"), "Applications in Vespa Cloud require a certificate", "Try 'vespa cert'") + var msg string + if vespa.Auth0AccessTokenEnabled() { + msg = "Try 'vespa auth cert'" + } else { + msg = "Try 'vespa cert'" + } + fatalErrHint(fmt.Errorf("Missing certificate in application package"), "Applications in Vespa Cloud require a certificate", msg) return opts } var err error @@ -271,3 +289,32 @@ func getDeploymentOpts(cfg *Config, pkg vespa.ApplicationPackage, target vespa.T } return opts } + +func getEndpointsFromEnv() map[string]string { + endpointsString := getEndpointsOverride() + if endpointsString == "" { + return nil + } + + var endpoints endpoints + urlsByCluster := make(map[string]string) + if err := json.Unmarshal([]byte(endpointsString), &endpoints); err != nil { + fatalErrHint(err, "Endpoints must be valid JSON") + } + if len(endpoints.Endpoints) == 0 { + fatalErr(fmt.Errorf("endpoints must be non-empty")) + } + for _, endpoint := range endpoints.Endpoints { + urlsByCluster[endpoint.Cluster] = endpoint.URL + } + return urlsByCluster +} + +type endpoints struct { + Endpoints []endpoint `json:"endpoints"` +} + +type endpoint struct { + Cluster string `json:"cluster"` + URL string `json:"url"` +} diff --git a/client/go/cmd/login.go b/client/go/cmd/login.go index f7b412a4613..5011b290b9f 100644 --- a/client/go/cmd/login.go +++ b/client/go/cmd/login.go @@ -6,17 +6,11 @@ import ( "github.com/vespa-engine/vespa/client/go/vespa" ) -func init() { - if vespa.Auth0AccessTokenEnabled() { - rootCmd.AddCommand(loginCmd) - } -} - var loginCmd = &cobra.Command{ Use: "login", Args: cobra.NoArgs, Short: "Authenticate the Vespa CLI", - Example: "$ vespa login", + Example: "$ vespa auth login", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() diff --git a/client/go/cmd/logout.go b/client/go/cmd/logout.go index e3cfe6733eb..ddc1d36d5e1 100644 --- a/client/go/cmd/logout.go +++ b/client/go/cmd/logout.go @@ -3,20 +3,13 @@ package cmd import ( "github.com/spf13/cobra" "github.com/vespa-engine/vespa/client/go/auth0" - "github.com/vespa-engine/vespa/client/go/vespa" ) -func init() { - if vespa.Auth0AccessTokenEnabled() { - rootCmd.AddCommand(logoutCmd) - } -} - var logoutCmd = &cobra.Command{ Use: "logout", Args: cobra.NoArgs, Short: "Log out of Vespa Cli", - Example: "$ vespa logout", + Example: "$ vespa auth logout", DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/client/go/cmd/prod.go b/client/go/cmd/prod.go index 382ede0fae8..c686f1d29ad 100644 --- a/client/go/cmd/prod.go +++ b/client/go/cmd/prod.go @@ -116,7 +116,7 @@ For more information about production deployments in Vespa Cloud see: https://cloud.vespa.ai/en/getting-to-production https://cloud.vespa.ai/en/automated-deployments`, DisableAutoGenTag: true, - Example: `$ mvn package + Example: `$ mvn package # when adding custom Java components $ vespa prod submit`, Run: func(cmd *cobra.Command, args []string) { target := getTarget() @@ -139,15 +139,17 @@ $ vespa prod submit`, fatalErrHint(fmt.Errorf("No deployment.xml found"), "Try creating one with vespa prod init") return } - if !pkg.IsJava() { - // TODO: Loosen this requirement when we start supporting applications with Java in production - fatalErrHint(fmt.Errorf("No jar files found in %s", pkg.Path), "Only applications containing Java components are currently supported") + if pkg.TestPath == "" { + fatalErrHint(fmt.Errorf("No tests found"), + "The application must be a Java maven project, or include basic HTTP tests under src/test/application/", + "See https://cloud.vespa.ai/en/getting-to-production") return } + verifyTests(pkg.TestPath, target) isCI := os.Getenv("CI") != "" if !isCI { - fmt.Fprintln(stderr, color.Yellow("Warning:"), "Submitting from a non-CI environment is discouraged") - printErrHint(nil, "See https://cloud.vespa.ai/en/getting-to-production for best practices") + fmt.Fprintln(stderr, color.Yellow("Warning:"), "We recommend doing this only from a CD job") + printErrHint(nil, "See https://cloud.vespa.ai/en/getting-to-production") } opts := getDeploymentOpts(cfg, pkg, target) if err := vespa.Submit(opts); err != nil { @@ -347,3 +349,28 @@ func prompt(r *bufio.Reader, question, defaultAnswer string, validator func(inpu } return input } + +func verifyTests(testsParent string, target vespa.Target) { + verifyTest(testsParent, "system-test", target, true) + verifyTest(testsParent, "staging-setup", target, true) + verifyTest(testsParent, "staging-test", target, true) + verifyTest(testsParent, "production-test", target, false) +} + +func verifyTest(testsParent string, suite string, target vespa.Target, required bool) { + testDirectory := filepath.Join(testsParent, "tests", suite) + _, err := os.Stat(testDirectory) + if err != nil { + if required { + if errors.Is(err, os.ErrNotExist) { + fatalErrHint(fmt.Errorf("No %s tests found", suite), + fmt.Sprintf("No such directory: %s", testDirectory), + "See https://cloud.vespa.ai/en/reference/testing") + } + fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") + } + return + } + + runTests(testDirectory, true) +} diff --git a/client/go/cmd/prod_test.go b/client/go/cmd/prod_test.go index 4ce6112122a..a4f3ebd6b56 100644 --- a/client/go/cmd/prod_test.go +++ b/client/go/cmd/prod_test.go @@ -16,7 +16,7 @@ import ( func TestProdInit(t *testing.T) { homeDir := filepath.Join(t.TempDir(), ".vespa") pkgDir := filepath.Join(t.TempDir(), "app") - createApplication(t, pkgDir) + createApplication(t, pkgDir, false) answers := []string{ // Regions @@ -81,7 +81,7 @@ func readFileString(t *testing.T, filename string) string { return string(content) } -func createApplication(t *testing.T, pkgDir string) { +func createApplication(t *testing.T, pkgDir string, java bool) { appDir := filepath.Join(pkgDir, "src", "main", "application") targetDir := filepath.Join(pkgDir, "target") if err := os.MkdirAll(appDir, 0755); err != nil { @@ -120,7 +120,24 @@ func createApplication(t *testing.T, pkgDir string) { if err := os.MkdirAll(targetDir, 0755); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(pkgDir, "pom.xml"), []byte(""), 0644); err != nil { + if java { + if err := ioutil.WriteFile(filepath.Join(pkgDir, "pom.xml"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + } else { + testsDir := filepath.Join(pkgDir, "src", "test", "application", "tests") + testBytes, _ := ioutil.ReadAll(strings.NewReader("{\"steps\":[{}]}")) + writeTest(filepath.Join(testsDir, "system-test", "test.json"), testBytes, t) + writeTest(filepath.Join(testsDir, "staging-setup", "test.json"), testBytes, t) + writeTest(filepath.Join(testsDir, "staging-test", "test.json"), testBytes, t) + } +} + +func writeTest(path string, content []byte, t *testing.T) { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path, content, 0644); err != nil { t.Fatal(err) } } @@ -128,7 +145,37 @@ func createApplication(t *testing.T, pkgDir string) { func TestProdSubmit(t *testing.T) { homeDir := filepath.Join(t.TempDir(), ".vespa") pkgDir := filepath.Join(t.TempDir(), "app") - createApplication(t, pkgDir) + createApplication(t, pkgDir, false) + + httpClient := &mockHttpClient{} + httpClient.NextResponse(200, `ok`) + execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, httpClient) + execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "cloud"}}, t, httpClient) + execute(command{homeDir: homeDir, args: []string{"api-key"}}, t, httpClient) + execute(command{homeDir: homeDir, args: []string{"cert", pkgDir}}, t, httpClient) + + // Zipping requires relative paths, so much let command run from pkgDir, then reset cwd for subsequent tests. + if cwd, err := os.Getwd(); err != nil { + t.Fatal(err) + } else { + defer os.Chdir(cwd) + } + if err := os.Chdir(pkgDir); err != nil { + t.Fatal(err) + } + if err := os.Setenv("CI", "true"); err != nil { + t.Fatal(err) + } + out, err := execute(command{homeDir: homeDir, args: []string{"prod", "submit"}}, t, httpClient) + assert.Equal(t, "", err) + assert.Contains(t, out, "Success: Submitted") + assert.Contains(t, out, "See https://console.vespa.oath.cloud/tenant/t1/application/a1/prod/deployment for deployment progress") +} + +func TestProdSubmitWithJava(t *testing.T) { + homeDir := filepath.Join(t.TempDir(), ".vespa") + pkgDir := filepath.Join(t.TempDir(), "app") + createApplication(t, pkgDir, true) httpClient := &mockHttpClient{} httpClient.NextResponse(200, `ok`) @@ -137,7 +184,7 @@ func TestProdSubmit(t *testing.T) { execute(command{homeDir: homeDir, args: []string{"api-key"}}, t, httpClient) execute(command{homeDir: homeDir, args: []string{"cert", pkgDir}}, t, httpClient) - // Copy an application package pre-assambled with mvn package + // Copy an application package pre-assembled with mvn package testAppDir := filepath.Join("testdata", "applications", "withDeployment", "target") zipFile := filepath.Join(testAppDir, "application.zip") copyFile(t, filepath.Join(pkgDir, "target", "application.zip"), zipFile) diff --git a/client/go/cmd/query.go b/client/go/cmd/query.go index 76688438fb4..6638c275330 100644 --- a/client/go/cmd/query.go +++ b/client/go/cmd/query.go @@ -39,7 +39,7 @@ can be set by the syntax [parameter-name]=[value].`, } func query(arguments []string) { - service := getService("query", 0) + service := getService("query", 0, "") url, _ := url.Parse(service.BaseURL + "/search/") urlQuery := url.Query() for i := 0; i < len(arguments); i++ { diff --git a/client/go/cmd/query_test.go b/client/go/cmd/query_test.go index ec6c3063906..55046ae49ba 100644 --- a/client/go/cmd/query_test.go +++ b/client/go/cmd/query_test.go @@ -75,5 +75,5 @@ func assertQueryServiceError(t *testing.T, status int, errorMessage string) { } func queryServiceURL(client *mockHttpClient) string { - return getService("query", 0).BaseURL + return getService("query", 0, "").BaseURL } diff --git a/client/go/cmd/test.go b/client/go/cmd/test.go new file mode 100644 index 00000000000..262b57eff33 --- /dev/null +++ b/client/go/cmd/test.go @@ -0,0 +1,461 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// vespa test command +// Author: jonmv + +package cmd + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "github.com/spf13/cobra" + "github.com/vespa-engine/vespa/client/go/util" + "github.com/vespa-engine/vespa/client/go/vespa" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +func init() { + rootCmd.AddCommand(testCmd) + testCmd.PersistentFlags().StringVarP(&zoneArg, zoneFlag, "z", "dev.aws-us-east-1c", "The zone to use for deployment") +} + +var testCmd = &cobra.Command{ + Use: "test <tests directory or test file>", + Short: "Run a test suite, or a single test", + Long: `Run a test suite, or a single test + +Runs all JSON test files in the specified directory, or the single JSON test file specified. + +See https://cloud.vespa.ai/en/reference/testing.html for details.`, + Example: `$ vespa test src/test/application/tests/system-test +$ vespa test src/test/application/tests/system-test/feed-and-query.json`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if count, failed := runTests(args[0], false); len(failed) != 0 { + plural := "s" + if count == 1 { + plural = "" + } + fmt.Fprintf(stdout, "\n%s %d of %d test%s failed:\n", color.Red("Failure:"), len(failed), count, plural) + for _, test := range failed { + fmt.Fprintln(stdout, test) + } + exitFunc(3) + } else { + plural := "s" + if count == 1 { + plural = "" + } + fmt.Fprintf(stdout, "\n%s %d test%s OK\n", color.Green("Success:"), count, plural) + } + }, +} + +func runTests(rootPath string, dryRun bool) (int, []string) { + count := 0 + failed := make([]string, 0) + if stat, err := os.Stat(rootPath); err != nil { + fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") + } else if stat.IsDir() { + tests, err := ioutil.ReadDir(rootPath) // TODO: Use os.ReadDir when >= 1.16 is required. + if err != nil { + fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") + } + context := testContext{testsPath: rootPath, dryRun: dryRun} + previousFailed := false + for _, test := range tests { + if !test.IsDir() && filepath.Ext(test.Name()) == ".json" { + testPath := filepath.Join(rootPath, test.Name()) + if previousFailed { + fmt.Fprintln(stdout, "") + previousFailed = false + } + failure := runTest(testPath, context) + if failure != "" { + failed = append(failed, failure) + previousFailed = true + } + count++ + } + } + } else if strings.HasSuffix(stat.Name(), ".json") { + failure := runTest(rootPath, testContext{testsPath: filepath.Dir(rootPath), dryRun: dryRun}) + if failure != "" { + failed = append(failed, failure) + } + count++ + } + if count == 0 { + fatalErrHint(fmt.Errorf("Failed to find any tests at %s", rootPath), "See https://cloud.vespa.ai/en/reference/testing") + } + return count, failed +} + +// Runs the test at the given path, and returns the specified test name if the test fails +func runTest(testPath string, context testContext) string { + var test test + testBytes, err := ioutil.ReadFile(testPath) + if err != nil { + fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") + } + if err = json.Unmarshal(testBytes, &test); err != nil { + fatalErrHint(err, fmt.Sprintf("Failed parsing test at %s", testPath), "See https://cloud.vespa.ai/en/reference/testing") + } + + testName := test.Name + if test.Name == "" { + testName = filepath.Base(testPath) + } + if !context.dryRun { + fmt.Fprintf(stdout, "%s:", testName) + } + + defaultParameters, err := getParameters(test.Defaults.ParametersRaw, filepath.Dir(testPath)) + if err != nil { + fmt.Fprintln(stderr) + fatalErrHint(err, fmt.Sprintf("Invalid default parameters for %s", testName), "See https://cloud.vespa.ai/en/reference/testing") + } + + if len(test.Steps) == 0 { + fmt.Fprintln(stderr) + fatalErrHint(fmt.Errorf("a test must have at least one step, but none were found in %s", testPath), "See https://cloud.vespa.ai/en/reference/testing") + } + for i, step := range test.Steps { + stepName := fmt.Sprintf("Step %d", i+1) + if step.Name != "" { + stepName += ": " + step.Name + } + failure, longFailure, err := verify(step, test.Defaults.Cluster, defaultParameters, context) + if err != nil { + fmt.Fprintln(stderr) + fatalErrHint(err, fmt.Sprintf("Error in %s", stepName), "See https://cloud.vespa.ai/en/reference/testing") + } + if !context.dryRun { + if failure != "" { + fmt.Fprintf(stdout, " %s\n%s:\n%s\n", color.Red("failed"), stepName, longFailure) + return fmt.Sprintf("%s: %s: %s", testName, stepName, failure) + } + if i == 0 { + fmt.Fprintf(stdout, " ") + } + fmt.Fprint(stdout, ".") + } + } + if !context.dryRun { + fmt.Fprintln(stdout, color.Green(" OK")) + } + return "" +} + +// Asserts specified response is obtained for request, or returns a failure message, or an error if this fails +func verify(step step, defaultCluster string, defaultParameters map[string]string, context testContext) (string, string, error) { + requestBody, err := getBody(step.Request.BodyRaw, context.testsPath) + if err != nil { + return "", "", err + } + + parameters, err := getParameters(step.Request.ParametersRaw, context.testsPath) + if err != nil { + return "", "", err + } + for name, value := range defaultParameters { + if _, present := parameters[name]; !present { + parameters[name] = value + } + } + + cluster := step.Request.Cluster + if cluster == "" { + cluster = defaultCluster + } + + method := step.Request.Method + if method == "" { + method = "GET" + } + + var service *vespa.Service + requestUri := step.Request.URI + if requestUri == "" { + requestUri = "/search/" + } + requestUrl, err := url.ParseRequestURI(requestUri) + if err != nil { + return "", "", err + } + externalEndpoint := requestUrl.IsAbs() + if !externalEndpoint && !context.dryRun { + service, err = context.target().Service("query", 0, 0, cluster) + if err != nil { + return "", "", err + } + requestUrl, err = url.ParseRequestURI(service.BaseURL + requestUri) + if err != nil { + return "", "", err + } + } + query := requestUrl.Query() + for name, value := range parameters { + query.Add(name, value) + } + requestUrl.RawQuery = query.Encode() + + header := http.Header{} + header.Add("Content-Type", "application/json") // TODO: Not guaranteed to be true ... + + request := &http.Request{ + URL: requestUrl, + Method: method, + Header: header, + Body: ioutil.NopCloser(bytes.NewReader(requestBody)), + } + defer request.Body.Close() + + statusCode := step.Response.Code + if statusCode == 0 { + statusCode = 200 + } + + responseBodySpecBytes, err := getBody(step.Response.BodyRaw, context.testsPath) + if err != nil { + return "", "", err + } + var responseBodySpec interface{} + if responseBodySpecBytes != nil { + err = json.Unmarshal(responseBodySpecBytes, &responseBodySpec) + if err != nil { + return "", "", fmt.Errorf("invalid response body spec: %w", err) + } + } + + if context.dryRun { + return "", "", nil + } + + var response *http.Response + if externalEndpoint { + util.ActiveHttpClient.UseCertificate([]tls.Certificate{}) + response, err = util.ActiveHttpClient.Do(request, 60*time.Second) + } else { + response, err = service.Do(request, 600*time.Second) // Vespa should provide a response within the given request timeout + } + if err != nil { + return "", "", err + } + defer response.Body.Close() + + if statusCode != response.StatusCode { + return fmt.Sprintf("Unexpected status code: %d", color.Red(response.StatusCode)), + fmt.Sprintf("Unexpected status code\nExpected: %d\nActual: %d\nRequested: %s at %s\nResponse:\n%s", + color.Cyan(statusCode), + color.Red(response.StatusCode), + color.Cyan(method), + color.Cyan(requestUrl), + util.ReaderToJSON(response.Body)), nil + } + + if responseBodySpec == nil { + return "", "", nil + } + + responseBodyBytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", "", err + } + var responseBody interface{} + err = json.Unmarshal(responseBodyBytes, &responseBody) + if err != nil { + return "", "", fmt.Errorf("got non-JSON response; %w:\n%s", err, string(responseBodyBytes)) + } + + failure, expected, actual, err := compare(responseBodySpec, responseBody, "") + if failure != "" { + responsePretty, _ := json.MarshalIndent(responseBody, "", " ") + longFailure := failure + if expected != "" { + longFailure += "\nExpected: " + expected + } + if actual != "" { + failure += ": " + actual + longFailure += "\nActual: " + actual + } + longFailure += fmt.Sprintf("\nRequested: %s at %s\nResponse:\n%s", color.Cyan(method), color.Cyan(requestUrl), string(responsePretty)) + return failure, longFailure, err + } + return "", "", err +} + +func compare(expected interface{}, actual interface{}, path string) (string, string, string, error) { + typeMatch := false + valueMatch := false + switch u := expected.(type) { + case nil: + typeMatch = actual == nil + valueMatch = actual == nil + case bool: + v, ok := actual.(bool) + typeMatch = ok + valueMatch = ok && u == v + case float64: + v, ok := actual.(float64) + typeMatch = ok + valueMatch = ok && math.Abs(u-v) < 1e-9 + case string: + v, ok := actual.(string) + typeMatch = ok + valueMatch = ok && (u == v) + case []interface{}: + v, ok := actual.([]interface{}) + typeMatch = ok + if ok { + if len(u) == len(v) { + for i, e := range u { + if failure, expected, actual, err := compare(e, v[i], fmt.Sprintf("%s/%d", path, i)); failure != "" || err != nil { + return failure, expected, actual, err + } + } + valueMatch = true + } else { + return fmt.Sprintf("Unexpected number of elements at %s", color.Cyan(path)), + fmt.Sprintf("%d", color.Cyan(len(u))), + fmt.Sprintf("%d", color.Red(len(v))), + nil + } + } + case map[string]interface{}: + v, ok := actual.(map[string]interface{}) + typeMatch = ok + if ok { + for n, e := range u { + childPath := fmt.Sprintf("%s/%s", path, strings.ReplaceAll(strings.ReplaceAll(n, "~", "~0"), "/", "~1")) + f, ok := v[n] + if !ok { + return fmt.Sprintf("Missing expected field at %s", color.Red(childPath)), "", "", nil + } + if failure, expected, actual, err := compare(e, f, childPath); failure != "" || err != nil { + return failure, expected, actual, err + } + } + valueMatch = true + } + default: + return "", "", "", fmt.Errorf("unexpected JSON type for value '%v'", expected) + } + + if !valueMatch { + if path == "" { + path = "root" + } + mismatched := "type" + if typeMatch { + mismatched = "value" + } + expectedJson, _ := json.Marshal(expected) + actualJson, _ := json.Marshal(actual) + return fmt.Sprintf("Unexpected %s at %s", mismatched, color.Cyan(path)), + fmt.Sprintf("%s", color.Cyan(expectedJson)), + fmt.Sprintf("%s", color.Red(actualJson)), + nil + } + return "", "", "", nil +} + +func getParameters(parametersRaw []byte, testsPath string) (map[string]string, error) { + if parametersRaw != nil { + var parametersPath string + if err := json.Unmarshal(parametersRaw, ¶metersPath); err == nil { + if err = validateRelativePath(parametersPath); err != nil { + return nil, err + } + resolvedParametersPath := filepath.Join(testsPath, parametersPath) + parametersRaw, err = ioutil.ReadFile(resolvedParametersPath) + if err != nil { + return nil, fmt.Errorf("failed to read request parameters at %s: %w", resolvedParametersPath, err) + } + } + var parameters map[string]string + if err := json.Unmarshal(parametersRaw, ¶meters); err != nil { + return nil, fmt.Errorf("request parameters must be JSON with only string values: %w", err) + } + return parameters, nil + } + return make(map[string]string), nil +} + +func getBody(bodyRaw []byte, testsPath string) ([]byte, error) { + var bodyPath string + if err := json.Unmarshal(bodyRaw, &bodyPath); err == nil { + if err = validateRelativePath(bodyPath); err != nil { + return nil, err + } + resolvedBodyPath := filepath.Join(testsPath, bodyPath) + bodyRaw, err = ioutil.ReadFile(resolvedBodyPath) + if err != nil { + return nil, fmt.Errorf("failed to read body file at %s: %w", resolvedBodyPath, err) + } + } + return bodyRaw, nil +} + +func validateRelativePath(relPath string) error { + if filepath.IsAbs(relPath) { + return fmt.Errorf("path must be relative, but was '%s'", relPath) + } + cleanPath := filepath.Clean(relPath) + if strings.HasPrefix(cleanPath, "../../../") { + return fmt.Errorf("path may not point outside src/test/application, but '%s' does", relPath) + } + return nil +} + +type test struct { + Name string `json:"name"` + Defaults defaults `json:"defaults"` + Steps []step `json:"steps"` +} + +type defaults struct { + Cluster string `json:"cluster"` + ParametersRaw json.RawMessage `json:"parameters"` +} + +type step struct { + Name string `json:"name"` + Request request `json:"request"` + Response response `json:"response"` +} + +type request struct { + Cluster string `json:"cluster"` + Method string `json:"method"` + URI string `json:"uri"` + ParametersRaw json.RawMessage `json:"parameters"` + BodyRaw json.RawMessage `json:"body"` +} + +type response struct { + Code int `json:"code"` + BodyRaw json.RawMessage `json:"body"` +} + +type testContext struct { + lazyTarget vespa.Target + testsPath string + dryRun bool +} + +func (t *testContext) target() vespa.Target { + if t.lazyTarget == nil { + t.lazyTarget = getTarget() + } + return t.lazyTarget +} diff --git a/client/go/cmd/test_test.go b/client/go/cmd/test_test.go new file mode 100644 index 00000000000..6649353df77 --- /dev/null +++ b/client/go/cmd/test_test.go @@ -0,0 +1,157 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// test command tests +// Author: jonmv + +package cmd + +import ( + "fmt" + "github.com/vespa-engine/vespa/client/go/util" + "github.com/vespa-engine/vespa/client/go/vespa" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSuite(t *testing.T) { + client := &mockHttpClient{} + searchResponse, _ := ioutil.ReadFile("testdata/tests/response.json") + client.NextStatus(200) + client.NextStatus(200) + for i := 0; i < 11; i++ { + client.NextResponse(200, string(searchResponse)) + } + + expectedBytes, _ := ioutil.ReadFile("testdata/tests/expected-suite.out") + outBytes, errBytes := execute(command{args: []string{"test", "testdata/tests/system-test"}}, t, client) + + baseUrl := "http://127.0.0.1:8080" + urlWithQuery := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s" + requests := []*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(urlWithQuery), createSearchRequest(urlWithQuery)} + requests = append(requests, createSearchRequest(baseUrl+"/search/")) + requests = append(requests, createSearchRequest(baseUrl+"/search/?foo=%2F")) + for i := 0; i < 7; i++ { + requests = append(requests, createSearchRequest(baseUrl+"/search/")) + } + assertRequests(requests, client, t) + fmt.Println(outBytes) + assert.Equal(t, string(expectedBytes), outBytes) + assert.Equal(t, "", errBytes) +} + +func TestIllegalFileReference(t *testing.T) { + client := &mockHttpClient{} + client.NextStatus(200) + client.NextStatus(200) + _, errBytes := execute(command{args: []string{"test", "testdata/tests/production-test/illegal-reference.json"}}, t, client) + assertRequests([]*http.Request{createRequest("GET", "http://127.0.0.1:8080/search/", "{}")}, client, t) + assert.Equal(t, "\nError: path may not point outside src/test/application, but 'foo/../../../../this-is-not-ok.json' does\nHint: Error in Step 2\nHint: See https://cloud.vespa.ai/en/reference/testing\n", errBytes) +} + +func TestProductionTest(t *testing.T) { + client := &mockHttpClient{} + client.NextStatus(200) + outBytes, errBytes := execute(command{args: []string{"test", "testdata/tests/production-test/external.json"}}, t, client) + assert.Equal(t, "external.json: . OK\n\nSuccess: 1 test OK\n", outBytes) + assert.Equal(t, "", errBytes) + assertRequests([]*http.Request{createRequest("GET", "https://my.service:123/path?query=wohoo", "")}, client, t) +} + +func TestTestWithoutAssertions(t *testing.T) { + client := &mockHttpClient{} + _, errBytes := execute(command{args: []string{"test", "testdata/tests/system-test/foo/query.json"}}, t, client) + assert.Equal(t, "\nError: a test must have at least one step, but none were found in testdata/tests/system-test/foo/query.json\nHint: See https://cloud.vespa.ai/en/reference/testing\n", errBytes) +} + +func TestSuiteWithoutTests(t *testing.T) { + client := &mockHttpClient{} + _, errBytes := execute(command{args: []string{"test", "testdata/tests/staging-test"}}, t, client) + assert.Equal(t, "Error: Failed to find any tests at testdata/tests/staging-test\nHint: See https://cloud.vespa.ai/en/reference/testing\n", errBytes) +} + +func TestSingleTest(t *testing.T) { + client := &mockHttpClient{} + searchResponse, _ := ioutil.ReadFile("testdata/tests/response.json") + client.NextStatus(200) + client.NextStatus(200) + client.NextResponse(200, string(searchResponse)) + client.NextResponse(200, string(searchResponse)) + + expectedBytes, _ := ioutil.ReadFile("testdata/tests/expected.out") + outBytes, errBytes := execute(command{args: []string{"test", "testdata/tests/system-test/test.json"}}, t, client) + assert.Equal(t, string(expectedBytes), outBytes) + assert.Equal(t, "", errBytes) + + baseUrl := "http://127.0.0.1:8080" + rawUrl := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s" + assertRequests([]*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(rawUrl), createSearchRequest(rawUrl)}, client, t) +} + +func TestSingleTestWithCloudAndEndpoints(t *testing.T) { + cmd := command{args: []string{"test", "testdata/tests/system-test/test.json", "-t", "cloud", "-a", "t.a.i"}} + cmd.homeDir = filepath.Join(t.TempDir(), ".vespa") + os.MkdirAll(cmd.homeDir, 0700) + keyFile := filepath.Join(cmd.homeDir, "key") + certFile := filepath.Join(cmd.homeDir, "cert") + + os.Setenv("VESPA_CLI_DATA_PLANE_KEY_FILE", keyFile) + os.Setenv("VESPA_CLI_DATA_PLANE_CERT_FILE", certFile) + os.Setenv("VESPA_CLI_ENDPOINTS", "{\"endpoints\":[{\"cluster\":\"container\",\"url\":\"https://url\"}]}") + + kp, _ := vespa.CreateKeyPair() + ioutil.WriteFile(keyFile, kp.PrivateKey, 0600) + ioutil.WriteFile(certFile, kp.Certificate, 0600) + + client := &mockHttpClient{} + searchResponse, _ := ioutil.ReadFile("testdata/tests/response.json") + client.NextStatus(200) + client.NextStatus(200) + client.NextResponse(200, string(searchResponse)) + client.NextResponse(200, string(searchResponse)) + + expectedBytes, _ := ioutil.ReadFile("testdata/tests/expected.out") + outBytes, errBytes := execute(cmd, t, client) + assert.Equal(t, string(expectedBytes), outBytes) + assert.Equal(t, "", errBytes) + + baseUrl := "https://url" + rawUrl := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s" + assertRequests([]*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(rawUrl), createSearchRequest(rawUrl)}, client, t) +} + +func createFeedRequest(urlPrefix string) *http.Request { + return createRequest("POST", + urlPrefix+"/document/v1/test/music/docid/doc?timeout=3.4s", + "{\"fields\":{\"artist\":\"Foo Fighters\"}}") +} + +func createSearchRequest(rawUrl string) *http.Request { + return createRequest("GET", rawUrl, "") +} + +func createRequest(method string, uri string, body string) *http.Request { + requestUrl, _ := url.ParseRequestURI(uri) + return &http.Request{ + URL: requestUrl, + Method: method, + Header: nil, + Body: ioutil.NopCloser(strings.NewReader(body)), + } +} + +func assertRequests(requests []*http.Request, client *mockHttpClient, t *testing.T) { + if assert.Equal(t, len(requests), len(client.requests)) { + for i, e := range requests { + a := client.requests[i] + assert.Equal(t, e.URL.String(), a.URL.String()) + assert.Equal(t, e.Method, a.Method) + assert.Equal(t, util.ReaderToJSON(e.Body), util.ReaderToJSON(a.Body)) + } + } +} diff --git a/client/go/cmd/testdata/empty.json b/client/go/cmd/testdata/empty.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/client/go/cmd/testdata/empty.json @@ -0,0 +1 @@ +{}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/body.json b/client/go/cmd/testdata/tests/body.json new file mode 100644 index 00000000000..767330b1a2d --- /dev/null +++ b/client/go/cmd/testdata/tests/body.json @@ -0,0 +1,12 @@ +{ + "root": { + "id": "toplevel", + "coverage": { + "full": true + }, + "fields": { + "totalCount" : 1 + }, + "children": [{}] + } +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/expected-suite.out b/client/go/cmd/testdata/tests/expected-suite.out new file mode 100644 index 00000000000..df916f50a95 --- /dev/null +++ b/client/go/cmd/testdata/tests/expected-suite.out @@ -0,0 +1,385 @@ +My test: .... OK +wrong-bool-value.json: failed +Step 1: +Unexpected value at /root/coverage/full +Expected: false +Actual: true +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-code.json: failed +Step 1: +Unexpected status code +Expected: 123 +Actual: 200 +Requested: GET at http://127.0.0.1:8080/search/?foo=%2F +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-element-count.json: failed +Step 1: +Unexpected number of elements at /root/children +Expected: 0 +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-field-name.json: failed +Step 1: +Missing expected field at /root/fields/totalCountDracula +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-float-value.json: failed +Step 1: +Unexpected value at /root/children/0/relevance +Expected: 0.381862373599 +Actual: 0.38186238359951247 +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-int-value.json: failed +Step 1: +Unexpected value at /root/fields/totalCount +Expected: 2 +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-null-value.json: failed +Step 1: +Missing expected field at /boot +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-string-value.json: failed +Step 1: +Unexpected value at /root/children/0/fields/artist +Expected: "Boo Fighters" +Actual: "Foo Fighters" +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-type.json: failed +Step 1: +Unexpected type at /root/fields/totalCount +Expected: "1" +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +Failure: 9 of 10 tests failed: +wrong-bool-value.json: Step 1: Unexpected value at /root/coverage/full: true +wrong-code.json: Step 1: Unexpected status code: 200 +wrong-element-count.json: Step 1: Unexpected number of elements at /root/children: 1 +wrong-field-name.json: Step 1: Missing expected field at /root/fields/totalCountDracula +wrong-float-value.json: Step 1: Unexpected value at /root/children/0/relevance: 0.38186238359951247 +wrong-int-value.json: Step 1: Unexpected value at /root/fields/totalCount: 1 +wrong-null-value.json: Step 1: Missing expected field at /boot +wrong-string-value.json: Step 1: Unexpected value at /root/children/0/fields/artist: "Foo Fighters" +wrong-type.json: Step 1: Unexpected type at /root/fields/totalCount: 1 diff --git a/client/go/cmd/testdata/tests/expected.out b/client/go/cmd/testdata/tests/expected.out new file mode 100644 index 00000000000..2ca35fe6a37 --- /dev/null +++ b/client/go/cmd/testdata/tests/expected.out @@ -0,0 +1,3 @@ +My test: .... OK + +Success: 1 test OK diff --git a/client/go/cmd/testdata/tests/production-test/external.json b/client/go/cmd/testdata/tests/production-test/external.json new file mode 100644 index 00000000000..af288bc8b1b --- /dev/null +++ b/client/go/cmd/testdata/tests/production-test/external.json @@ -0,0 +1,9 @@ +{ + "steps": [ + { + "request": { + "uri": "https://my.service:123/path?query=wohoo" + } + } + ] +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/production-test/illegal-reference.json b/client/go/cmd/testdata/tests/production-test/illegal-reference.json new file mode 100644 index 00000000000..edd8a2fafeb --- /dev/null +++ b/client/go/cmd/testdata/tests/production-test/illegal-reference.json @@ -0,0 +1,14 @@ +{ + "steps": [ + { + "request": { + "body": "foo/../../../empty.json" + } + }, + { + "request": { + "body": "foo/../../../../this-is-not-ok.json" + } + } + ] +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/response.json b/client/go/cmd/testdata/tests/response.json new file mode 100644 index 00000000000..48368b935a8 --- /dev/null +++ b/client/go/cmd/testdata/tests/response.json @@ -0,0 +1,34 @@ +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/staging-test/not-json b/client/go/cmd/testdata/tests/staging-test/not-json new file mode 100644 index 00000000000..b6fc4c620b6 --- /dev/null +++ b/client/go/cmd/testdata/tests/staging-test/not-json @@ -0,0 +1 @@ +hello
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/system-test/foo/body.json b/client/go/cmd/testdata/tests/system-test/foo/body.json new file mode 100644 index 00000000000..0bbf626eafe --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/foo/body.json @@ -0,0 +1,5 @@ +{ + "fields": { + "artist": "Foo Fighters" + } +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/system-test/foo/query.json b/client/go/cmd/testdata/tests/system-test/foo/query.json new file mode 100644 index 00000000000..25b8c5b0039 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/foo/query.json @@ -0,0 +1,3 @@ +{ + "query": "artist: foo" +} diff --git a/client/go/cmd/testdata/tests/system-test/test.json b/client/go/cmd/testdata/tests/system-test/test.json new file mode 100644 index 00000000000..2e327b5e5df --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/test.json @@ -0,0 +1,65 @@ +{ + "name": "My test", + "defaults": { + "cluster": "container", + "parameters": { + "timeout": "3.4s" + } + }, + "steps": [ + { + "name": "feed music", + "request": { + "method": "POST", + "body": "foo/body.json", + "uri": "/document/v1/test/music/docid/doc" + } + }, + { + "name": "re-feed music", + "request": { + "method": "POST", + "body": { + "fields": { + "artist": "Foo Fighters" + } + }, + "uri": "/document/v1/test/music/docid/doc" + } + }, + { + "name": "query for foo", + "request": { + "uri": "/search/?presentation.timing=true", + "parameters": { + "query": "artist: foo" + } + }, + "response": { + "code": 200, + "body": "../body.json" + } + }, + { + "name": "query for foo again", + "request": { + "uri": "/search/?presentation.timing=true", + "parameters": "foo/query.json" + }, + "response": { + "body": { + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters" + }, + "relevance": 0.381862383599 + } + ] + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-bool-value.json b/client/go/cmd/testdata/tests/system-test/wrong-bool-value.json new file mode 100644 index 00000000000..c594a206347 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-bool-value.json @@ -0,0 +1,15 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "coverage": { + "full": false + } + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-code.json b/client/go/cmd/testdata/tests/system-test/wrong-code.json new file mode 100644 index 00000000000..c325054faa1 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-code.json @@ -0,0 +1,14 @@ +{ + "steps": [ + { + "request": { + "parameters": { + "foo": "/" + } + }, + "response": { + "code": 123 + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-element-count.json b/client/go/cmd/testdata/tests/system-test/wrong-element-count.json new file mode 100644 index 00000000000..a772af67a78 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-element-count.json @@ -0,0 +1,13 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "children": [] + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-field-name.json b/client/go/cmd/testdata/tests/system-test/wrong-field-name.json new file mode 100644 index 00000000000..6ce3d055584 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-field-name.json @@ -0,0 +1,15 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "fields": { + "totalCountDracula" : 1 + } + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-float-value.json b/client/go/cmd/testdata/tests/system-test/wrong-float-value.json new file mode 100644 index 00000000000..6a1b221a91a --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-float-value.json @@ -0,0 +1,17 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "children": [ + { + "relevance": 0.381862373599 + } + ] + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-int-value.json b/client/go/cmd/testdata/tests/system-test/wrong-int-value.json new file mode 100644 index 00000000000..d61a8b002c2 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-int-value.json @@ -0,0 +1,15 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "fields": { + "totalCount" : 2 + } + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-null-value.json b/client/go/cmd/testdata/tests/system-test/wrong-null-value.json new file mode 100644 index 00000000000..ea78357c99e --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-null-value.json @@ -0,0 +1,11 @@ +{ + "steps": [ + { + "response": { + "body": { + "boot": null + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-string-value.json b/client/go/cmd/testdata/tests/system-test/wrong-string-value.json new file mode 100644 index 00000000000..5f56ebaab6d --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-string-value.json @@ -0,0 +1,19 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "children": [ + { + "fields": { + "artist": "Boo Fighters" + } + } + ] + } + } + } + } + ] +} diff --git a/client/go/cmd/testdata/tests/system-test/wrong-type.json b/client/go/cmd/testdata/tests/system-test/wrong-type.json new file mode 100644 index 00000000000..6be28ff68ff --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-type.json @@ -0,0 +1,15 @@ +{ + "steps": [ + { + "response": { + "body": { + "root": { + "fields": { + "totalCount" : "1" + } + } + } + } + } + ] +} diff --git a/client/go/cmd/vespa/main.go b/client/go/cmd/vespa/main.go index 5fdf64f5ab4..32828b15aa4 100644 --- a/client/go/cmd/vespa/main.go +++ b/client/go/cmd/vespa/main.go @@ -6,8 +6,11 @@ package main import ( "github.com/vespa-engine/vespa/client/go/cmd" + "os" ) func main() { - cmd.Execute() + if err := cmd.Execute(); err != nil { + os.Exit(1) + } } diff --git a/client/go/util/http.go b/client/go/util/http.go index acd9bb4f7ec..d5b8e3128ff 100644 --- a/client/go/util/http.go +++ b/client/go/util/http.go @@ -19,7 +19,7 @@ var ActiveHttpClient = CreateClient(time.Second * 10) type HttpClient interface { Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) - UseCertificate(certificate tls.Certificate) + UseCertificate(certificate []tls.Certificate) } type defaultHttpClient struct { @@ -33,9 +33,9 @@ func (c *defaultHttpClient) Do(request *http.Request, timeout time.Duration) (re return c.client.Do(request) } -func (c *defaultHttpClient) UseCertificate(certificate tls.Certificate) { +func (c *defaultHttpClient) UseCertificate(certificates []tls.Certificate) { c.client.Transport = &http.Transport{TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{certificate}, + Certificates: certificates, }} } diff --git a/client/go/util/http_test.go b/client/go/util/http_test.go index 0a0de1fdd4c..e87a1e5ada4 100644 --- a/client/go/util/http_test.go +++ b/client/go/util/http_test.go @@ -36,7 +36,7 @@ func (c mockHttpClient) Do(request *http.Request, timeout time.Duration) (respon nil } -func (c mockHttpClient) UseCertificate(certificate tls.Certificate) {} +func (c mockHttpClient) UseCertificate(certificates []tls.Certificate) {} func TestHttpRequest(t *testing.T) { ActiveHttpClient = mockHttpClient{} diff --git a/client/go/util/spinner.go b/client/go/util/spinner.go index 1deb4296d28..9f3c2cb4e44 100644 --- a/client/go/util/spinner.go +++ b/client/go/util/spinner.go @@ -11,22 +11,24 @@ import ( ) const ( - spinnerTextEllipsis = "..." - spinnerTextDone = "done" - spinnerTextFailed = "failed" - spinnerColor = "blue" + spinnerTextDone = "done" + spinnerTextFailed = "failed" + spinnerColor = "blue" ) var messages = os.Stderr func Spinner(text string, fn func() error) error { - initialMsg := text + spinnerTextEllipsis + " " - doneMsg := initialMsg + spinnerTextDone + "\n" - failMsg := initialMsg + spinnerTextFailed + "\n" - + initialMsg := text + " " + doneMsg := "\r" + initialMsg + spinnerTextDone + "\n" + failMsg := "\r" + initialMsg + spinnerTextFailed + "\n" return loading(initialMsg, doneMsg, failMsg, fn) } +func Waiting(fn func() error) error { + return loading("", "", "", fn) +} + func loading(initialMsg, doneMsg, failMsg string, fn func() error) error { done := make(chan struct{}) errc := make(chan error) @@ -39,7 +41,7 @@ func loading(initialMsg, doneMsg, failMsg string, fn func() error) error { s.HideCursor = true s.Writer = messages - if err := s.Color(spinnerColor); err != nil { + if err := s.Color(spinnerColor, "bold"); err != nil { panic(Error(err, "failed setting spinner color")) } @@ -55,6 +57,7 @@ func loading(initialMsg, doneMsg, failMsg string, fn func() error) error { err := fn() errc <- err <-done + return err } diff --git a/client/go/vespa/deploy.go b/client/go/vespa/deploy.go index 252a646bcfc..d52fc969c37 100644 --- a/client/go/vespa/deploy.go +++ b/client/go/vespa/deploy.go @@ -73,7 +73,7 @@ func (d DeploymentOpts) String() string { func (d *DeploymentOpts) IsCloud() bool { return d.Target.Type() == cloudTargetType } func (d *DeploymentOpts) url(path string) (*url.URL, error) { - service, err := d.Target.Service(deployService, 0, 0) + service, err := d.Target.Service(deployService, 0, 0, "") if err != nil { return nil, err } @@ -139,7 +139,7 @@ func (ap *ApplicationPackage) zipReader(test bool) (io.ReadCloser, error) { tempZip.Close() os.Remove(tempZip.Name()) }() - if err := zipDir(ap.Path, tempZip.Name()); err != nil { + if err := zipDir(zipFile, tempZip.Name()); err != nil { return nil, err } zipFile = tempZip.Name() @@ -167,6 +167,10 @@ func FindApplicationPackage(zipOrDir string, requirePackaging bool) (Application } } if util.PathExists(filepath.Join(zipOrDir, "src", "main", "application")) { + if util.PathExists(filepath.Join(zipOrDir, "src", "test", "application")) { + return ApplicationPackage{Path: filepath.Join(zipOrDir, "src", "main", "application"), + TestPath: filepath.Join(zipOrDir, "src", "test", "application")}, nil + } return ApplicationPackage{Path: filepath.Join(zipOrDir, "src", "main", "application")}, nil } if util.PathExists(filepath.Join(zipOrDir, "services.xml")) { @@ -370,7 +374,12 @@ func uploadApplicationPackage(url *url.URL, opts DeploymentOpts) (int64, error) if err := opts.Target.PrepareApiRequest(request, sigKeyId); err != nil { return 0, err } - response, err := util.HttpDo(request, time.Minute*10, serviceDescription) + + var response *http.Response + err = util.Spinner("Uploading application package ...", func() error { + response, err = util.HttpDo(request, time.Minute*10, serviceDescription) + return err + }) if err != nil { return 0, err } @@ -440,7 +449,10 @@ func zipDir(dir string, destination string) error { } defer file.Close() - zippath := strings.TrimPrefix(path, dir) + zippath, err := filepath.Rel(dir, path) + if err != nil { + return err + } zipfile, err := w.Create(zippath) if err != nil { return err diff --git a/client/go/vespa/target.go b/client/go/vespa/target.go index 093cb2b5cad..204dda6538f 100644 --- a/client/go/vespa/target.go +++ b/client/go/vespa/target.go @@ -39,7 +39,6 @@ type Service struct { BaseURL string Name string TLSOptions TLSOptions - Target *Target } // Target represents a Vespa platform, running named Vespa services. @@ -48,7 +47,7 @@ type Target interface { Type() string // Service returns the service for given name. If timeout is non-zero, wait for the service to converge. - Service(name string, timeout time.Duration, sessionOrRunID int64) (*Service, error) + Service(name string, timeout time.Duration, sessionOrRunID int64, cluster string) (*Service, error) // PrintLog writes the logs of this deployment using given options to control output. PrintLog(options LogOptions) error @@ -91,7 +90,7 @@ func (t *customTarget) PrepareApiRequest(req *http.Request, sigKeyId string) err // Do sends request to this service. Any required authentication happens automatically. func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Response, error) { if s.TLSOptions.KeyPair.Certificate != nil { - util.ActiveHttpClient.UseCertificate(s.TLSOptions.KeyPair) + util.ActiveHttpClient.UseCertificate([]tls.Certificate{s.TLSOptions.KeyPair}) } return util.HttpDo(request, timeout, s.Description()) } @@ -129,7 +128,7 @@ func (s *Service) Description() string { func (t *customTarget) Type() string { return t.targetType } -func (t *customTarget) Service(name string, timeout time.Duration, sessionID int64) (*Service, error) { +func (t *customTarget) Service(name string, timeout time.Duration, sessionOrRunID int64, cluster string) (*Service, error) { if timeout > 0 && name != deployService { if err := t.waitForConvergence(timeout); err != nil { return nil, err @@ -171,7 +170,7 @@ func (t *customTarget) urlWithPort(serviceName string) (string, error) { } func (t *customTarget) waitForConvergence(timeout time.Duration) error { - deployer, err := t.Service(deployService, 0, 0) + deployer, err := t.Service(deployService, 0, 0, "") if err != nil { return err } @@ -241,8 +240,8 @@ func (t *cloudTarget) resolveEndpoint(cluster string) (string, error) { func (t *cloudTarget) Type() string { return t.targetType } -func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64) (*Service, error) { - if name != deployService { +func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64, cluster string) (*Service, error) { + if name != deployService && t.urlsByCluster == nil { if err := t.waitForEndpoints(timeout, runID); err != nil { return nil, err } @@ -251,13 +250,13 @@ func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64) ( case deployService: return &Service{Name: name, BaseURL: t.apiURL}, nil case queryService: - queryURL, err := t.resolveEndpoint("") + queryURL, err := t.resolveEndpoint(cluster) if err != nil { return nil, err } return &Service{Name: name, BaseURL: queryURL, TLSOptions: t.tlsOptions}, nil case documentService: - documentURL, err := t.resolveEndpoint("") + documentURL, err := t.resolveEndpoint(cluster) if err != nil { return nil, err } @@ -489,7 +488,7 @@ func CustomTarget(baseURL string) Target { // CloudTarget creates a Target for the Vespa Cloud platform. func CloudTarget(apiURL string, deployment Deployment, apiKey []byte, tlsOptions TLSOptions, logOptions LogOptions, - authConfigPath string, systemName string, cloudAuth string) Target { + authConfigPath string, systemName string, cloudAuth string, urlsByCluster map[string]string) Target { return &cloudTarget{ apiURL: apiURL, targetType: cloudTargetType, @@ -500,6 +499,7 @@ func CloudTarget(apiURL string, deployment Deployment, apiKey []byte, tlsOptions authConfigPath: authConfigPath, systemName: systemName, cloudAuth: cloudAuth, + urlsByCluster: urlsByCluster, } } @@ -536,7 +536,7 @@ type requestFunc func() *http.Request func wait(fn responseFunc, reqFn requestFunc, certificate *tls.Certificate, timeout time.Duration) (int, error) { if certificate != nil { - util.ActiveHttpClient.UseCertificate(*certificate) + util.ActiveHttpClient.UseCertificate([]tls.Certificate{*certificate}) } var ( httpErr error diff --git a/client/go/vespa/target_test.go b/client/go/vespa/target_test.go index 9d2418897e3..0cfe9f1962c 100644 --- a/client/go/vespa/target_test.go +++ b/client/go/vespa/target_test.go @@ -82,11 +82,11 @@ func TestCustomTargetWait(t *testing.T) { defer srv.Close() target := CustomTarget(srv.URL) - _, err := target.Service("query", time.Millisecond, 42) + _, err := target.Service("query", time.Millisecond, 42, "") assert.NotNil(t, err) vc.deploymentConverged = true - _, err = target.Service("query", time.Millisecond, 42) + _, err = target.Service("query", time.Millisecond, 42, "") assert.Nil(t, err) assertServiceWait(t, 200, target, "deploy") @@ -104,11 +104,11 @@ func TestCloudTargetWait(t *testing.T) { target := createCloudTarget(t, srv.URL, &logWriter) assertServiceWait(t, 200, target, "deploy") - _, err := target.Service("query", time.Millisecond, 42) + _, err := target.Service("query", time.Millisecond, 42, "") assert.NotNil(t, err) vc.deploymentConverged = true - _, err = target.Service("query", time.Millisecond, 42) + _, err = target.Service("query", time.Millisecond, 42, "") assert.Nil(t, err) assertServiceWait(t, 500, target, "query") @@ -152,7 +152,7 @@ func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target { target := CloudTarget("https://example.com", Deployment{ Application: ApplicationID{Tenant: "t1", Application: "a1", Instance: "i1"}, Zone: ZoneID{Environment: "dev", Region: "us-north-1"}, - }, apiKey, TLSOptions{KeyPair: x509KeyPair}, LogOptions{Writer: logWriter}, "", "", "") + }, apiKey, TLSOptions{KeyPair: x509KeyPair}, LogOptions{Writer: logWriter}, "", "", "", nil) if ct, ok := target.(*cloudTarget); ok { ct.apiURL = url } else { @@ -162,13 +162,13 @@ func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target { } func assertServiceURL(t *testing.T, url string, target Target, service string) { - s, err := target.Service(service, 0, 42) + s, err := target.Service(service, 0, 42, "") assert.Nil(t, err) assert.Equal(t, url, s.BaseURL) } func assertServiceWait(t *testing.T, expectedStatus int, target Target, service string) { - s, err := target.Service(service, 0, 42) + s, err := target.Service(service, 0, 42, "") assert.Nil(t, err) status, err := s.Wait(0) diff --git a/client/pom.xml b/client/pom.xml index 3dee909b932..ea33b9f3adf 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -28,15 +28,8 @@ <version>1.6</version> </dependency> <dependency> - <groupId>org.spockframework</groupId> - <artifactId>spock-core</artifactId> - <version>1.3-groovy-2.5</version> - <scope>test</scope> - </dependency> - <dependency> - <groupId>org.codehaus.groovy</groupId> - <artifactId>groovy</artifactId> - <version>3.0.8</version> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter</artifactId> <scope>test</scope> </dependency> </dependencies> @@ -44,19 +37,6 @@ <build> <plugins> <plugin> - <groupId>org.codehaus.gmavenplus</groupId> - <artifactId>gmavenplus-plugin</artifactId> - <version>1.13.0</version> - <executions> - <execution> - <goals> - <goal>addTestSources</goal> - <goal>compileTests</goal> - </goals> - </execution> - </executions> - </plugin> - <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> diff --git a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy b/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy deleted file mode 100644 index 0d6e2ca3506..00000000000 --- a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.client.dsl - -import spock.lang.Specification - -class QTest extends Specification { - - def "select specific fields"() { - given: - def q = Q.select("f1", "f2") - .from("sd1") - .where("f1").contains("v1") - .semicolon() - .build() - - expect: - q == """yql=select f1, f2 from sd1 where f1 contains "v1";""" - } - - def "select from specific sources"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").contains("v1") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 contains "v1";""" - } - - def "select from multiples sources"() { - given: - def q = Q.select("*") - .from("sd1", "sd2") - .where("f1").contains("v1") - .semicolon() - .build() - - expect: - q == """yql=select * from sources sd1, sd2 where f1 contains "v1";""" - } - - def "basic 'and', 'andnot', 'or', 'offset', 'limit', 'param', 'order by', and 'contains'"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").contains("v1") - .and("f2").contains("v2") - .or("f3").contains("v3") - .andnot("f4").contains("v4") - .offset(1) - .limit(2) - .timeout(3) - .orderByDesc("f1") - .orderByAsc("f2") - .semicolon() - .param("paramk1", "paramv1") - .build() - - expect: - q == """yql=select * from sd1 where f1 contains "v1" and f2 contains "v2" or f3 contains "v3" and !(f4 contains "v4") order by f1 desc, f2 asc limit 2 offset 1 timeout 3;¶mk1=paramv1""" - } - - def "matches"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").matches("v1") - .and("f2").matches("v2") - .or("f3").matches("v3") - .andnot("f4").matches("v4") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 matches "v1" and f2 matches "v2" or f3 matches "v3" and !(f4 matches "v4");""" - } - - def "numeric operations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").le(1) - .and("f2").lt(2) - .and("f3").ge(3) - .and("f4").gt(4) - .and("f5").eq(5) - .and("f6").inRange(6, 7) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 <= 1 and f2 < 2 and f3 >= 3 and f4 > 4 and f5 = 5 and range(f6, 6, 7);""" - } - - def "long numeric operations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").le(1L) - .and("f2").lt(2L) - .and("f3").ge(3L) - .and("f4").gt(4L) - .and("f5").eq(5L) - .and("f6").inRange(6L, 7L) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 <= 1L and f2 < 2L and f3 >= 3L and f4 > 4L and f5 = 5L and range(f6, 6L, 7L);""" - } - - def "float numeric operations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").le(1.1) - .and("f2").lt(2.2) - .and("f3").ge(3.3) - .and("f4").gt(4.4) - .and("f5").eq(5.5) - .and("f6").inRange(6.6, 7.7) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 <= 1.1 and f2 < 2.2 and f3 >= 3.3 and f4 > 4.4 and f5 = 5.5 and range(f6, 6.6, 7.7);""" - } - - def "double numeric operations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").le(1.1D) - .and("f2").lt(2.2D) - .and("f3").ge(3.3D) - .and("f4").gt(4.4D) - .and("f5").eq(5.5D) - .and("f6").inRange(6.6D, 7.7D) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 <= 1.1 and f2 < 2.2 and f3 >= 3.3 and f4 > 4.4 and f5 = 5.5 and range(f6, 6.6, 7.7);""" - } - - def "nested queries"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").contains("1") - .andnot(Q.p(Q.p("f2").contains("2").and("f3").contains("3")) - .or(Q.p("f2").contains("4").andnot("f3").contains("5"))) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 contains "1" and !((f2 contains "2" and f3 contains "3") or (f2 contains "4" and !(f3 contains "5")));""" - } - - def "userInput (with and with out defaultIndex)"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.ui("value")) - .and(Q.ui("index", "value2")) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where userInput(@_1) and ([{"defaultIndex":"index"}]userInput(@_2_index));&_2_index=value2&_1=value""" - } - - def "dot product"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.dotPdt("f1", [a: 1, b: 2, c: 3])) - .and("f2").contains("1") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where dotProduct(f1, {"a":1,"b":2,"c":3}) and f2 contains "1";""" - } - - def "weighted set"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.wtdSet("f1", [a: 1, b: 2, c: 3])) - .and("f2").contains("1") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where weightedSet(f1, {"a":1,"b":2,"c":3}) and f2 contains "1";""" - } - - def "non empty"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.nonEmpty(Q.p("f1").contains("v1"))) - .and("f2").contains("v2") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where nonEmpty(f1 contains "v1") and f2 contains "v2";""" - } - - - def "wand (with and without annotation)"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.wand("f1", [a: 1, b: 2, c: 3])) - .and(Q.wand("f2", [[1, 1], [2, 2]])) - .and( - Q.wand("f3", [[1, 1], [2, 2]]) - .annotate(A.a("scoreThreshold", 0.13)) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where wand(f1, {"a":1,"b":2,"c":3}) and wand(f2, [[1,1],[2,2]]) and ([{"scoreThreshold":0.13}]wand(f3, [[1,1],[2,2]]));""" - } - - def "weak and (with and without annotation)"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.weakand(Q.p("f1").contains("v1").and("f2").contains("v2"))) - .and(Q.weakand(Q.p("f1").contains("v1").and("f2").contains("v2")) - .annotate(A.a("scoreThreshold", 0.13)) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where weakAnd(f1 contains "v1", f2 contains "v2") and ([{"scoreThreshold":0.13}]weakAnd(f1 contains "v1", f2 contains "v2"));""" - } - - def "geo location"() { - given: - def q = Q.select("*") - .from("sd1") - .where("a").contains("b").and(Q.geoLocation("taiwan", 25.105497, 121.597366, "200km")) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where a contains "b" and geoLocation(taiwan, 25.105497, 121.597366, "200km");""" - } - - def "nearest neighbor query"() { - when: - def q = Q.select("*") - .from("sd1") - .where("a").contains("b") - .and(Q.nearestNeighbor("vec1", "vec2") - .annotate(A.a("targetHits", 10, "approximate", false)) - ) - .semicolon() - .build() - - then: - q == """yql=select * from sd1 where a contains "b" and ([{"approximate":false,"targetHits":10}]nearestNeighbor(vec1, vec2));""" - } - - def "invalid nearest neighbor should throws an exception (targetHits annotation is required)"() { - when: - def q = Q.select("*") - .from("sd1") - .where("a").contains("b").and(Q.nearestNeighbor("vec1", "vec2")) - .semicolon() - .build() - - then: - thrown(IllegalArgumentException) - } - - - def "rank with only query"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.rank( - Q.p("f1").contains("v1") - ) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where rank(f1 contains "v1");""" - } - - def "rank"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.rank( - Q.p("f1").contains("v1"), - Q.p("f2").contains("v2"), - Q.p("f3").eq(3)) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where rank(f1 contains "v1", f2 contains "v2", f3 = 3);""" - } - - def "rank with rank query array"() { - given: - Query[] ranks = [Q.p("f2").contains("v2"), Q.p("f3").eq(3)].toArray() - def q = Q.select("*") - .from("sd1") - .where(Q.rank( - Q.p("f1").contains("v1"), - ranks) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where rank(f1 contains "v1", f2 contains "v2", f3 = 3);""" - } - - def "string/function annotations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").contains(annotation, "v1") - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where f1 contains (${expected}"v1");""" - - where: - annotation | expected - A.filter() | """[{"filter":true}]""" - A.defaultIndex("idx") | """[{"defaultIndex":"idx"}]""" - A.a([a1: [k1: "v1", k2: 2]]) | """[{"a1":{"k1":"v1","k2":2}}]""" - } - - def "sub-expression annotations"() { - given: - def q = Q.select("*") - .from("sd1") - .where("f1").contains("v1").annotate(A.a("ak1", "av1")) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where ([{"ak1":"av1"}](f1 contains "v1"));""" - } - - def "sub-expressions annotations (annotate in the middle of query)"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.p("f1").contains("v1").annotate(A.a("ak1", "av1")).and("f2").contains("v2")) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where ([{"ak1":"av1"}](f1 contains "v1" and f2 contains "v2"));""" - } - - def "sub-expressions annotations (annotate in nested queries)"() { - given: - def q = Q.select("*") - .from("sd1") - .where(Q.p( - Q.p("f1").contains("v1").annotate(A.a("ak1", "av1"))) - .and("f2").contains("v2") - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sd1 where (([{"ak1":"av1"}](f1 contains "v1")) and f2 contains "v2");""" - } - - def "build query which created from Q.b without select and sources"() { - given: - def q = Q.p("f1").contains("v1") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains "v1";""" - } - - def "order by"() { - given: - def q = Q.p("f1").contains("v1") - .orderByAsc("f2") - .orderByAsc(A.a([function: "uca", locale: "en_US", strength: "IDENTICAL"]), "f3") - .orderByDesc("f4") - .orderByDesc(A.a([function: "lowercase"]), "f5") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains "v1" order by f2 asc, [{"function":"uca","locale":"en_US","strength":"IDENTICAL"}]f3 asc, f4 desc, [{"function":"lowercase"}]f5 desc;""" - } - - def "contains sameElement"() { - given: - def q = Q.p("f1").containsSameElement(Q.p("stime").le(1).and("etime").gt(2)) - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains sameElement(stime <= 1, etime > 2);""" - } - - def "contains phrase/near/onear/equiv"() { - given: - def funcName = "contains${operator.capitalize()}" - def q1 = Q.p("f1")."$funcName"("p1", "p2", "p3") - .semicolon() - .build() - def q2 = Q.p("f1")."$funcName"(["p1", "p2", "p3"]) - .semicolon() - .build() - - expect: - q1 == """yql=select * from sources * where f1 contains ${operator}("p1", "p2", "p3");""" - q2 == """yql=select * from sources * where f1 contains ${operator}("p1", "p2", "p3");""" - - where: - operator | _ - "phrase" | _ - "near" | _ - "onear" | _ - "equiv" | _ - } - - def "contains uri"() { - given: - def q = Q.p("f1").containsUri("https://test.uri") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains uri("https://test.uri");""" - } - - def "contains uri with annotation"() { - given: - def q = Q.p("f1").containsUri(A.a("key", "value"), "https://test.uri") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains ([{"key":"value"}]uri("https://test.uri"));""" - } - - def "nearestNeighbor"() { - given: - def q = Q.p("f1").nearestNeighbor("query_vector") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where nearestNeighbor(f1, query_vector);""" - } - - def "nearestNeighbor with annotation"() { - given: - def q = Q.p("f1").nearestNeighbor(A.a("targetHits", 10), "query_vector") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where ([{"targetHits":10}]nearestNeighbor(f1, query_vector));""" - } - - def "use contains instead of contains equiv when input size is 1"() { - def q = Q.p("f1").containsEquiv(["p1"]) - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains "p1";""" - } - - def "contains phrase/near/onear/equiv empty list should throw illegal argument exception"() { - given: - def funcName = "contains${operator.capitalize()}" - - when: - def q = Q.p("f1")."$funcName"([]) - .semicolon() - .build() - - then: - thrown(IllegalArgumentException) - - where: - operator | _ - "phrase" | _ - "near" | _ - "onear" | _ - "equiv" | _ - } - - - def "contains near/onear with annotation"() { - given: - def funcName = "contains${operator.capitalize()}" - def q = Q.p("f1")."$funcName"(A.a("distance", 5), "p1", "p2", "p3") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains ([{"distance":5}]${operator}("p1", "p2", "p3"));""" - - where: - operator | _ - "near" | _ - "onear" | _ - } - - def "basic group syntax"() { - /* - example from vespa document: - https://docs.vespa.ai/en/grouping.html - all( group(a) max(5) each(output(count()) - all(max(1) each(output(summary()))) - all(group(b) each(output(count()) - all(max(1) each(output(summary()))) - all(group(c) each(output(count()) - all(max(1) each(output(summary())))))))) ); - */ - given: - def q = Q.p("f1").contains("v1") - .group( - G.all(G.group("a"), G.maxRtn(5), G.each(G.output(G.count()), - G.all(G.maxRtn(1), G.each(G.output(G.summary()))), - G.all(G.group("b"), G.each(G.output(G.count()), - G.all(G.maxRtn(1), G.each(G.output(G.summary()))), - G.all(G.group("c"), G.each(G.output(G.count()), - G.all(G.maxRtn(1), G.each(G.output(G.summary()))) - )) - )) - )) - ) - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains "v1" | all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))));""" - } - - def "set group syntax string directly"() { - /* - example from vespa document: - https://docs.vespa.ai/en/grouping.html - all( group(a) max(5) each(output(count()) - all(max(1) each(output(summary()))) - all(group(b) each(output(count()) - all(max(1) each(output(summary()))) - all(group(c) each(output(count()) - all(max(1) each(output(summary())))))))) ); - */ - given: - def q = Q.p("f1").contains("v1") - .group("all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))))") - .semicolon() - .build() - - expect: - q == """yql=select * from sources * where f1 contains "v1" | all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))));""" - } - - def "arbitrary annotations"() { - given: - def a = A.a("a1", "v1", "a2", 2, "a3", [k: "v", k2: 1], "a4", 4D, "a5", [1, 2, 3]) - expect: - a.toString() == """{"a1":"v1","a2":2,"a3":{"k":"v","k2":1},"a4":4.0,"a5":[1,2,3]}""" - } - - def "test programmability"() { - given: - def map = [a: "1", b: "2", c: "3"] - - when: - Query q = map - .entrySet() - .stream() - .map { entry -> Q.p(entry.key).contains(entry.value) } - .reduce { q1, q2 -> q1.and(q2) } - .get() - - then: - q.semicolon().build() == """yql=select * from sources * where a contains "1" and b contains "2" and c contains "3";""" - } - - def "test programmability 2"() { - given: - def map = [a: "1", b: "2", c: "3"] - def q = Q.p() - - when: - map.each { k, v -> - q.and(Q.p(k).contains(v)) - } - - then: - q.semicolon().build() == """yql=select * from sources * where a contains "1" and b contains "2" and c contains "3";""" - } - - def "empty queries should not print out"() { - given: - def q = Q.p(Q.p(Q.p().andnot(Q.p()).and(Q.p()))).and("a").contains("1").semicolon().build() - - expect: - q == """yql=select * from sources * where a contains "1";""" - } - - def "validate positive search term of strings"() { - given: - def q = Q.p(Q.p("k1").contains("v1").and("k2").contains("v2").andnot("k3").contains("v3")) - .andnot(Q.p("nk1").contains("nv1").and("nk2").contains("nv2").andnot("nk3").contains("nv3")) - .and(Q.p("k4").contains("v4") - .andnot(Q.p("k5").contains("v5").andnot("k6").contains("v6")) - ) - - expect: - q.hasPositiveSearchField("k1") - q.hasPositiveSearchField("k2") - q.hasPositiveSearchField("nk3") - q.hasPositiveSearchField("k6") - q.hasPositiveSearchField("k6", "v6") - !q.hasPositiveSearchField("k6", "v5") - - q.hasNegativeSearchField("k3") - q.hasNegativeSearchField("nk1") - q.hasNegativeSearchField("nk2") - q.hasNegativeSearchField("k5") - q.hasNegativeSearchField("k5", "v5") - !q.hasNegativeSearchField("k5", "v4") - } - - def "validate positive search term of user input"() { - given: - def q = Q.p(Q.ui("k1", "v1")).and(Q.ui("k2", "v2")).andnot(Q.ui("k3", "v3")) - .andnot(Q.p(Q.ui("nk1", "nv1")).and(Q.ui("nk2", "nv2")).andnot(Q.ui("nk3", "nv3"))) - .and(Q.p(Q.ui("k4", "v4")) - .andnot(Q.p(Q.ui("k5", "v5")).andnot(Q.ui("k6", "v6"))) - ) - - expect: - q.hasPositiveSearchField("k1") - q.hasPositiveSearchField("k2") - q.hasPositiveSearchField("nk3") - q.hasPositiveSearchField("k6") - q.hasPositiveSearchField("k6", "v6") - !q.hasPositiveSearchField("k6", "v5") - - q.hasNegativeSearchField("k3") - q.hasNegativeSearchField("nk1") - q.hasNegativeSearchField("nk2") - q.hasNegativeSearchField("k5") - q.hasNegativeSearchField("k5", "v5") - !q.hasNegativeSearchField("k5", "v4") - } -} diff --git a/client/src/test/java/ai/vespa/client/dsl/QTest.java b/client/src/test/java/ai/vespa/client/dsl/QTest.java new file mode 100644 index 00000000000..08ab603fa04 --- /dev/null +++ b/client/src/test/java/ai/vespa/client/dsl/QTest.java @@ -0,0 +1,727 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.client.dsl; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * @author unknown contributor + * @author bjorncs + */ +class QTest { + + @Test + void select_specific_fields() { + String q = Q.select("f1", "f2") + .from("sd1") + .where("f1").contains("v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select f1, f2 from sd1 where f1 contains \"v1\";"); + } + + @Test + void select_from_specific_sources() { + String q = Q.select("*") + .from("sd1") + .where("f1").contains("v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains \"v1\";"); + } + + @Test + void select_from_multiples_sources() { + String q = Q.select("*") + .from("sd1", "sd2") + .where("f1").contains("v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources sd1, sd2 where f1 contains \"v1\";"); + } + + @Test + void basic_and_andnot_or_offset_limit_param_order_by_and_contains() { + String q = Q.select("*") + .from("sd1") + .where("f1").contains("v1") + .and("f2").contains("v2") + .or("f3").contains("v3") + .andnot("f4").contains("v4") + .offset(1) + .limit(2) + .timeout(3) + .orderByDesc("f1") + .orderByAsc("f2") + .semicolon() + .param("paramk1", "paramv1") + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains \"v1\" and f2 contains \"v2\" or f3 contains \"v3\" and !(f4 contains \"v4\") order by f1 desc, f2 asc limit 2 offset 1 timeout 3;¶mk1=paramv1"); + } + + @Test + void matches() { + String q = Q.select("*") + .from("sd1") + .where("f1").matches("v1") + .and("f2").matches("v2") + .or("f3").matches("v3") + .andnot("f4").matches("v4") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 matches \"v1\" and f2 matches \"v2\" or f3 matches \"v3\" and !(f4 matches \"v4\");"); + } + + @Test + void numeric_operations() { + String q = Q.select("*") + .from("sd1") + .where("f1").le(1) + .and("f2").lt(2) + .and("f3").ge(3) + .and("f4").gt(4) + .and("f5").eq(5) + .and("f6").inRange(6, 7) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 <= 1 and f2 < 2 and f3 >= 3 and f4 > 4 and f5 = 5 and range(f6, 6, 7);"); + } + + @Test + void long_numeric_operations() { + String q = Q.select("*") + .from("sd1") + .where("f1").le(1L) + .and("f2").lt(2L) + .and("f3").ge(3L) + .and("f4").gt(4L) + .and("f5").eq(5L) + .and("f6").inRange(6L, 7L) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 <= 1L and f2 < 2L and f3 >= 3L and f4 > 4L and f5 = 5L and range(f6, 6L, 7L);"); + } + + @Test + void float_numeric_operations() { + String q = Q.select("*") + .from("sd1") + .where("f1").le(1.1) + .and("f2").lt(2.2) + .and("f3").ge(3.3) + .and("f4").gt(4.4) + .and("f5").eq(5.5) + .and("f6").inRange(6.6, 7.7) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 <= 1.1 and f2 < 2.2 and f3 >= 3.3 and f4 > 4.4 and f5 = 5.5 and range(f6, 6.6, 7.7);"); + } + + @Test + void double_numeric_operations() { + String q = Q.select("*") + .from("sd1") + .where("f1").le(1.1D) + .and("f2").lt(2.2D) + .and("f3").ge(3.3D) + .and("f4").gt(4.4D) + .and("f5").eq(5.5D) + .and("f6").inRange(6.6D, 7.7D) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 <= 1.1 and f2 < 2.2 and f3 >= 3.3 and f4 > 4.4 and f5 = 5.5 and range(f6, 6.6, 7.7);"); + } + + @Test + void nested_queries() { + String q = Q.select("*") + .from("sd1") + .where("f1").contains("1") + .andnot(Q.p(Q.p("f2").contains("2").and("f3").contains("3")) + .or(Q.p("f2").contains("4").andnot("f3").contains("5"))) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains \"1\" and !((f2 contains \"2\" and f3 contains \"3\") or (f2 contains \"4\" and !(f3 contains \"5\")));"); + } + + @Test + void userInput_with_and_with_out_defaultIndex() { + String q = Q.select("*") + .from("sd1") + .where(Q.ui("value")) + .and(Q.ui("index", "value2")) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where userInput(@_1) and ([{\"defaultIndex\":\"index\"}]userInput(@_2_index));&_2_index=value2&_1=value"); + } + + @Test + void dot_product() { + String q = Q.select("*") + .from("sd1") + .where(Q.dotPdt("f1", stringIntMap("a", 1, "b", 2, "c", 3))) + .and("f2").contains("1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where dotProduct(f1, {\"a\":1,\"b\":2,\"c\":3}) and f2 contains \"1\";"); + } + + @Test + void weighted_set() { + String q = Q.select("*") + .from("sd1") + .where(Q.wtdSet("f1", stringIntMap("a", 1, "b", 2, "c", 3))) + .and("f2").contains("1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where weightedSet(f1, {\"a\":1,\"b\":2,\"c\":3}) and f2 contains \"1\";"); + } + + @Test + void non_empty() { + String q = Q.select("*") + .from("sd1") + .where(Q.nonEmpty(Q.p("f1").contains("v1"))) + .and("f2").contains("v2") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where nonEmpty(f1 contains \"v1\") and f2 contains \"v2\";"); + } + + + @Test + void wand_with_and_without_annotation() { + String q = Q.select("*") + .from("sd1") + .where(Q.wand("f1", stringIntMap("a", 1, "b", 2, "c", 3))) + .and(Q.wand("f2", Arrays.asList(Arrays.asList(1, 1), Arrays.asList(2, 2)))) + .and( + Q.wand("f3", Arrays.asList(Arrays.asList(1, 1), Arrays.asList(2, 2))) + .annotate(A.a("scoreThreshold", 0.13)) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where wand(f1, {\"a\":1,\"b\":2,\"c\":3}) and wand(f2, [[1,1],[2,2]]) and ([{\"scoreThreshold\":0.13}]wand(f3, [[1,1],[2,2]]));"); + } + + @Test + void weak_and_with_and_without_annotation() { + String q = Q.select("*") + .from("sd1") + .where(Q.weakand(Q.p("f1").contains("v1").and("f2").contains("v2"))) + .and(Q.weakand(Q.p("f1").contains("v1").and("f2").contains("v2")) + .annotate(A.a("scoreThreshold", 0.13)) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where weakAnd(f1 contains \"v1\", f2 contains \"v2\") and ([{\"scoreThreshold\":0.13}]weakAnd(f1 contains \"v1\", f2 contains \"v2\"));"); + } + + @Test + void geo_location() { + String q = Q.select("*") + .from("sd1") + .where("a").contains("b").and(Q.geoLocation("taiwan", 25.105497, 121.597366, "200km")) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where a contains \"b\" and geoLocation(taiwan, 25.105497, 121.597366, \"200km\");"); + } + + @Test + void nearest_neighbor_query() { + String q = Q.select("*") + .from("sd1") + .where("a").contains("b") + .and(Q.nearestNeighbor("vec1", "vec2") + .annotate(A.a("targetHits", 10, "approximate", false)) + ) + .semicolon() + .build(); + assertEquals(q, "yql=select * from sd1 where a contains \"b\" and ([{\"approximate\":false,\"targetHits\":10}]nearestNeighbor(vec1, vec2));"); + } + + @Test + void invalid_nearest_neighbor_should_throws_an_exception_targetHits_annotation_is_required() { + assertThrows(IllegalArgumentException.class, + () -> Q.select("*") + .from("sd1") + .where("a").contains("b").and(Q.nearestNeighbor("vec1", "vec2")) + .semicolon() + .build()); + } + + + @Test + void rank_with_only_query() { + String q = Q.select("*") + .from("sd1") + .where(Q.rank( + Q.p("f1").contains("v1") + ) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where rank(f1 contains \"v1\");"); + } + + @Test + void rank() { + String q = Q.select("*") + .from("sd1") + .where(Q.rank( + Q.p("f1").contains("v1"), + Q.p("f2").contains("v2"), + Q.p("f3").eq(3)) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where rank(f1 contains \"v1\", f2 contains \"v2\", f3 = 3);"); + } + + @Test + void rank_with_rank_query_array() { + Query[] ranks = new Query[]{Q.p("f2").contains("v2"), Q.p("f3").eq(3)}; + String q = Q.select("*") + .from("sd1") + .where(Q.rank( + Q.p("f1").contains("v1"), + ranks) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where rank(f1 contains \"v1\", f2 contains \"v2\", f3 = 3);"); + } + + @Test + void stringfunction_annotations() { + + { + Annotation annotation = A.filter(); + String expected = "[{\"filter\":true}]"; + String q = Q.select("*") + .from("sd1") + .where("f1").contains(annotation, "v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains (" + expected + "\"v1\");"); + } + { + Annotation annotation = A.defaultIndex("idx"); + String expected = "[{\"defaultIndex\":\"idx\"}]"; + String q = Q.select("*") + .from("sd1") + .where("f1").contains(annotation, "v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains (" + expected + "\"v1\");"); + } + { + Annotation annotation = A.a(stringObjMap("a1", stringObjMap("k1", "v1", "k2", 2))); + String expected = "[{\"a1\":{\"k1\":\"v1\",\"k2\":2}}]"; + String q = Q.select("*") + .from("sd1") + .where("f1").contains(annotation, "v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where f1 contains (" + expected + "\"v1\");"); + } + + } + + @Test + void sub_expression_annotations() { + String q = Q.select("*") + .from("sd1") + .where("f1").contains("v1").annotate(A.a("ak1", "av1")) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where ([{\"ak1\":\"av1\"}](f1 contains \"v1\"));"); + } + + @Test + void sub_expressions_annotations_annotate_in_the_middle_of_query() { + String q = Q.select("*") + .from("sd1") + .where(Q.p("f1").contains("v1").annotate(A.a("ak1", "av1")).and("f2").contains("v2")) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where ([{\"ak1\":\"av1\"}](f1 contains \"v1\" and f2 contains \"v2\"));"); + } + + @Test + void sub_expressions_annotations_annotate_in_nested_queries() { + String q = Q.select("*") + .from("sd1") + .where(Q.p( + Q.p("f1").contains("v1").annotate(A.a("ak1", "av1"))) + .and("f2").contains("v2") + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sd1 where (([{\"ak1\":\"av1\"}](f1 contains \"v1\")) and f2 contains \"v2\");"); + } + + @Test + void build_query_which_created_from_Q_b_without_select_and_sources() { + String q = Q.p("f1").contains("v1") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains \"v1\";"); + } + + @Test + void order_by() { + String q = Q.p("f1").contains("v1") + .orderByAsc("f2") + .orderByAsc(A.a(stringObjMap("function", "uca", "locale", "en_US", "strength", "IDENTICAL")), "f3") + .orderByDesc("f4") + .orderByDesc(A.a(stringObjMap("function", "lowercase")), "f5") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains \"v1\" order by f2 asc, [{\"function\":\"uca\",\"locale\":\"en_US\",\"strength\":\"IDENTICAL\"}]f3 asc, f4 desc, [{\"function\":\"lowercase\"}]f5 desc;"); + } + + @Test + void contains_sameElement() { + String q = Q.p("f1").containsSameElement(Q.p("stime").le(1).and("etime").gt(2)) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains sameElement(stime <= 1, etime > 2);"); + } + + @Test + void contains_phrase_near_onear_equiv() { + { + String q1 = Q.p("f1").containsPhrase("p1", "p2", "p3") + .semicolon() + .build(); + String q2 = Q.p("f1").containsPhrase(Arrays.asList("p1", "p2", "p3")) + .semicolon() + .build(); + assertEquals(q1, "yql=select * from sources * where f1 contains phrase(\"p1\", \"p2\", \"p3\");"); + assertEquals(q2, "yql=select * from sources * where f1 contains phrase(\"p1\", \"p2\", \"p3\");"); + } + { + String q1 = Q.p("f1").containsNear("p1", "p2", "p3") + .semicolon() + .build(); + String q2 = Q.p("f1").containsNear(Arrays.asList("p1", "p2", "p3")) + .semicolon() + .build(); + assertEquals(q1, "yql=select * from sources * where f1 contains near(\"p1\", \"p2\", \"p3\");"); + assertEquals(q2, "yql=select * from sources * where f1 contains near(\"p1\", \"p2\", \"p3\");"); + } + { + String q1 = Q.p("f1").containsOnear("p1", "p2", "p3") + .semicolon() + .build(); + String q2 = Q.p("f1").containsOnear(Arrays.asList("p1", "p2", "p3")) + .semicolon() + .build(); + assertEquals(q1, "yql=select * from sources * where f1 contains onear(\"p1\", \"p2\", \"p3\");"); + assertEquals(q2, "yql=select * from sources * where f1 contains onear(\"p1\", \"p2\", \"p3\");"); + } + { + String q1 = Q.p("f1").containsEquiv("p1", "p2", "p3") + .semicolon() + .build(); + String q2 = Q.p("f1").containsEquiv(Arrays.asList("p1", "p2", "p3")) + .semicolon() + .build(); + assertEquals(q1, "yql=select * from sources * where f1 contains equiv(\"p1\", \"p2\", \"p3\");"); + assertEquals(q2, "yql=select * from sources * where f1 contains equiv(\"p1\", \"p2\", \"p3\");"); + } + } + + @Test + void contains_uri() { + String q = Q.p("f1").containsUri("https://test.uri") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains uri(\"https://test.uri\");"); + } + + @Test + void contains_uri_with_annotation() { + String q = Q.p("f1").containsUri(A.a("key", "value"), "https://test.uri") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains ([{\"key\":\"value\"}]uri(\"https://test.uri\"));"); + } + + @Test + void nearestNeighbor() { + String q = Q.p("f1").nearestNeighbor("query_vector") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where nearestNeighbor(f1, query_vector);"); + } + + @Test + void nearestNeighbor_with_annotation() { + String q = Q.p("f1").nearestNeighbor(A.a("targetHits", 10), "query_vector") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where ([{\"targetHits\":10}]nearestNeighbor(f1, query_vector));"); + } + + @Test + void use_contains_instead_of_contains_equiv_when_input_size_is_1() { + String q = Q.p("f1").containsEquiv(Collections.singletonList("p1")) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains \"p1\";"); + } + + @Test + void contains_phrase_near_onear_equiv_empty_list_should_throw_illegal_argument_exception() { + assertThrows(IllegalArgumentException.class, () -> Q.p("f1").containsPhrase(Collections.emptyList()) + .semicolon() + .build()); + + assertThrows(IllegalArgumentException.class, () -> Q.p("f1").containsNear(Collections.emptyList()) + .semicolon() + .build()); + + assertThrows(IllegalArgumentException.class, () -> Q.p("f1").containsOnear(Collections.emptyList()) + .semicolon() + .build()); + + assertThrows(IllegalArgumentException.class, () -> Q.p("f1").containsEquiv(Collections.emptyList()) + .semicolon() + .build()); + } + + + @Test + void contains_near_onear_with_annotation() { + { + String q = Q.p("f1").containsNear(A.a("distance", 5), "p1", "p2", "p3") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains ([{\"distance\":5}]near(\"p1\", \"p2\", \"p3\"));"); + } + { + String q = Q.p("f1").containsOnear(A.a("distance", 5), "p1", "p2", "p3") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains ([{\"distance\":5}]onear(\"p1\", \"p2\", \"p3\"));"); + } + } + + @Test + void basic_group_syntax() { + /* + example from vespa document: + https://docs.vespa.ai/en/grouping.html + all( group(a) max(5) each(output(count()) + all(max(1) each(output(summary()))) + all(group(b) each(output(count()) + all(max(1) each(output(summary()))) + all(group(c) each(output(count()) + all(max(1) each(output(summary())))))))) ); + */ + String q = Q.p("f1").contains("v1") + .group( + G.all(G.group("a"), G.maxRtn(5), G.each(G.output(G.count()), + G.all(G.maxRtn(1), G.each(G.output(G.summary()))), + G.all(G.group("b"), G.each(G.output(G.count()), + G.all(G.maxRtn(1), G.each(G.output(G.summary()))), + G.all(G.group("c"), G.each(G.output(G.count()), + G.all(G.maxRtn(1), G.each(G.output(G.summary()))) + )) + )) + )) + ) + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains \"v1\" | all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))));"); + } + + @Test + void set_group_syntax_string_directly() { + /* + example from vespa document: + https://docs.vespa.ai/en/grouping.html + all( group(a) max(5) each(output(count()) + all(max(1) each(output(summary()))) + all(group(b) each(output(count()) + all(max(1) each(output(summary()))) + all(group(c) each(output(count()) + all(max(1) each(output(summary())))))))) ); + */ + String q = Q.p("f1").contains("v1") + .group("all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))))") + .semicolon() + .build(); + + assertEquals(q, "yql=select * from sources * where f1 contains \"v1\" | all(group(a) max(5) each(output(count()) all(max(1) each(output(summary()))) all(group(b) each(output(count()) all(max(1) each(output(summary()))) all(group(c) each(output(count()) all(max(1) each(output(summary())))))))));"); + } + +@Test + void arbitrary_annotations() { + Annotation a = A.a("a1", "v1", "a2", 2, "a3", stringObjMap("k", "v", "k2", 1), "a4", 4D, "a5", Arrays.asList(1, 2, 3)); + assertEquals(a.toString(), "{\"a1\":\"v1\",\"a2\":2,\"a3\":{\"k\":\"v\",\"k2\":1},\"a4\":4.0,\"a5\":[1,2,3]}"); + } + + @Test + void test_programmability() { + Map<String, String> map = stringStringMap("a", "1", "b", "2", "c", "3"); + + Query q = map + .entrySet() + .stream() + .map(entry -> Q.p(entry.getKey()).contains(entry.getValue())) + .reduce(Query::and) + .get(); + + assertEquals(q.semicolon().build(), "yql=select * from sources * where a contains \"1\" and b contains \"2\" and c contains \"3\";"); + } + + @Test + void test_programmability_2() { + Map<String, String> map = stringStringMap("a", "1", "b", "2", "c", "3"); + Query q = Q.p(); + + map.forEach((k, v) -> q.and(Q.p(k).contains(v))); + + assertEquals(q.semicolon().build(), "yql=select * from sources * where a contains \"1\" and b contains \"2\" and c contains \"3\";"); + } + + @Test + void empty_queries_should_not_print_out() { + String q = Q.p(Q.p(Q.p().andnot(Q.p()).and(Q.p()))).and("a").contains("1").semicolon().build(); + + assertEquals(q, "yql=select * from sources * where a contains \"1\";"); + } + + @Test + void validate_positive_search_term_of_strings() { + Query q = Q.p(Q.p("k1").contains("v1").and("k2").contains("v2").andnot("k3").contains("v3")) + .andnot(Q.p("nk1").contains("nv1").and("nk2").contains("nv2").andnot("nk3").contains("nv3")) + .and(Q.p("k4").contains("v4") + .andnot(Q.p("k5").contains("v5").andnot("k6").contains("v6")) + ); + + assertTrue(q.hasPositiveSearchField("k1")); + assertTrue(q.hasPositiveSearchField("k2")); + assertTrue(q.hasPositiveSearchField("nk3")); + assertTrue(q.hasPositiveSearchField("k6")); + assertTrue(q.hasPositiveSearchField("k6", "v6")); + assertFalse(q.hasPositiveSearchField("k6", "v5")); + + assertTrue(q.hasNegativeSearchField("k3")); + assertTrue(q.hasNegativeSearchField("nk1")); + assertTrue(q.hasNegativeSearchField("nk2")); + assertTrue(q.hasNegativeSearchField("k5")); + assertTrue(q.hasNegativeSearchField("k5", "v5")); + assertFalse(q.hasNegativeSearchField("k5", "v4")); + } + + @Test + void validate_positive_search_term_of_user_input() { + Query q = Q.p(Q.ui("k1", "v1")).and(Q.ui("k2", "v2")).andnot(Q.ui("k3", "v3")) + .andnot(Q.p(Q.ui("nk1", "nv1")).and(Q.ui("nk2", "nv2")).andnot(Q.ui("nk3", "nv3"))) + .and(Q.p(Q.ui("k4", "v4")) + .andnot(Q.p(Q.ui("k5", "v5")).andnot(Q.ui("k6", "v6"))) + ); + + assertTrue(q.hasPositiveSearchField("k1")); + assertTrue(q.hasPositiveSearchField("k2")); + assertTrue(q.hasPositiveSearchField("nk3")); + assertTrue(q.hasPositiveSearchField("k6")); + assertTrue(q.hasPositiveSearchField("k6", "v6")); + assertFalse(q.hasPositiveSearchField("k6", "v5")); + + assertTrue(q.hasNegativeSearchField("k3")); + assertTrue(q.hasNegativeSearchField("nk1")); + assertTrue(q.hasNegativeSearchField("nk2")); + assertTrue(q.hasNegativeSearchField("k5")); + assertTrue(q.hasNegativeSearchField("k5", "v5")); + assertFalse(q.hasNegativeSearchField("k5", "v4")); + } + + private static Map<String, Integer> stringIntMap(String k1, int v1, String k2, int v2, String k3, int v3) { + HashMap<String, Integer> m = new HashMap<>(); + m.put(k1, v1); + m.put(k2, v2); + m.put(k3, v3); + return m; + } + + private static Map<String, Object> stringObjMap(String k, Object v) { + HashMap<String, Object> m = new HashMap<>(); + m.put(k, v); + return m; + } + + private static Map<String, Object> stringObjMap(String k1, Object v1, String k2, Object v2) { + Map<String, Object> m = new LinkedHashMap<>(); + m.put(k1, v1); + m.put(k2, v2); + return m; + } + + private static Map<String, Object> stringObjMap(String k1, Object v1, String k2, Object v2, String k3, Object v3) { + Map<String, Object> m = new LinkedHashMap<>(); + m.put(k1, v1); + m.put(k2, v2); + m.put(k3, v3); + return m; + } + + private static Map<String, String> stringStringMap(String k1, String v1, String k2, String v2, String k3, String v3) { + Map<String, String> m = new LinkedHashMap<>(); + m.put(k1, v1); + m.put(k2, v2); + m.put(k3, v3); + return m; + } +}
\ No newline at end of file diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml index 7113787a715..9250feca285 100644 --- a/cloud-tenant-base-dependencies-enforcer/pom.xml +++ b/cloud-tenant-base-dependencies-enforcer/pom.xml @@ -32,8 +32,8 @@ <jaxb.version>2.3.0</jaxb.version> <jetty.version>9.4.44.v20210927</jetty.version> <jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version> - <junit5.version>5.7.0</junit5.version> - <junit5.platform.version>1.7.0</junit5.platform.version> + <junit5.version>5.8.1</junit5.version> + <junit5.platform.version>1.8.1</junit5.platform.version> <onnxruntime.version>1.8.0</onnxruntime.version> <org.lz4.version>1.8.0</org.lz4.version> <org.json.version>20090211</org.json.version> @@ -42,6 +42,7 @@ <hk2.version>2.5.0-b32</hk2.version> <hk2.osgi-resource-locator.version>1.0.1</hk2.osgi-resource-locator.version> + <httpclient5.version>5.1.2</httpclient5.version> <jackson2.version>2.12.1</jackson2.version> <jackson-databind.version>${jackson2.version}</jackson-databind.version> <javassist.version>3.20.0-GA</javassist.version> @@ -202,6 +203,8 @@ <include>com.yahoo.vespa:tenant-cd-api:*:jar:test</include> <include>com.yahoo.vespa:tenant-cd-commons:*:jar:test</include> <include>com.yahoo.vespa:vespa-athenz:*:jar:test</include> + <include>com.yahoo.vespa:vespa-feed-client:*:jar:test</include> + <include>com.yahoo.vespa:vespa-feed-client-api:*:jar:test</include> <include>com.yahoo.vespa:vespa_jersey2:*:pom:test</include> <include>com.yahoo.vespa:vespaclient-core:*:jar:test</include> <include>com.yahoo.vespa:vsm:*:jar:test</include> @@ -238,9 +241,12 @@ <include>net.java.dev.jna:jna:4.5.2:jar:test</include> <include>org.abego.treelayout:org.abego.treelayout.core:1.0.1:jar:test</include> <include>org.antlr:antlr-runtime:3.5.2:jar:test</include> - <include>org.antlr:antlr4-runtime:4.9.3:jar:test</include> + <include>org.antlr:antlr4-runtime:4.5:jar:test</include> <include>org.apache.commons:commons-exec:1.3:jar:test</include> <include>org.apache.commons:commons-math3:3.6.1:jar:test</include> + <include>org.apache.httpcomponents.client5:httpclient5:${httpclient5.version}:jar:test</include> + <include>org.apache.httpcomponents.core5:httpcore5:${httpclient5.version}:jar:test</include> + <include>org.apache.httpcomponents.core5:httpcore5-h2:${httpclient5.version}:jar:test</include> <include>org.apache.httpcomponents:httpclient:4.5.12:jar:test</include> <include>org.apache.httpcomponents:httpcore:4.4.13:jar:test</include> <include>org.apache.opennlp:opennlp-tools:1.8.4:jar:test</include> diff --git a/cloud-tenant-cd/src/main/java/ai/vespa/hosted/cd/cloud/impl/VespaTestRuntime.java b/cloud-tenant-cd/src/main/java/ai/vespa/hosted/cd/cloud/impl/VespaTestRuntime.java index db9072e6198..201ddcb3908 100644 --- a/cloud-tenant-cd/src/main/java/ai/vespa/hosted/cd/cloud/impl/VespaTestRuntime.java +++ b/cloud-tenant-cd/src/main/java/ai/vespa/hosted/cd/cloud/impl/VespaTestRuntime.java @@ -10,6 +10,7 @@ import ai.vespa.hosted.api.TestConfig; import ai.vespa.hosted.cd.Deployment; import ai.vespa.hosted.cd.TestRuntime; import ai.vespa.hosted.cd.commons.HttpDeployment; +import ai.vespa.hosted.cd.commons.FeedClientBuilder; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.zone.ZoneId; @@ -39,7 +40,10 @@ public class VespaTestRuntime implements TestRuntime { } private VespaTestRuntime(TestConfig config) { this.config = config; - this.deploymentToTest = new HttpDeployment(config.deployments().get(config.zone()), new DefaultEndpointAuthenticator(config.system())); + DefaultEndpointAuthenticator authenticator = new DefaultEndpointAuthenticator(config.system()); + this.deploymentToTest = new HttpDeployment(config.deployments().get(config.zone()), authenticator); + FeedClientBuilder.setEndpointAuthenticator(authenticator); + System.setProperty(ai.vespa.feed.client.FeedClientBuilder.PREFERRED_IMPLEMENTATION_PROPERTY, FeedClientBuilder.class.getName()); } @Override diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java index 948e416eb53..3137dfff606 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java @@ -183,10 +183,10 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); - var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer); - var lookUp = new SlobrokClient(timer); - var stateGenerator = new StateChangeHandler(timer, log); - var stateBroadcaster = new SystemStateBroadcaster(timer, timer); + var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); + var lookUp = new SlobrokClient(context, timer); + var stateGenerator = new StateChangeHandler(context, timer, log); + var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContext.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContext.java index cc94dd88e60..d1aadf9d217 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContext.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContext.java @@ -16,4 +16,13 @@ public interface FleetControllerContext { default void log(Logger logger, Level level, String message) { log(logger, level, () -> message); } void log(Logger logger, Level level, String message, Throwable t); void log(Logger logger, Level level, Supplier<String> message); + + default void log(Logger logger, Level level, String format, Object first, Object... rest) { + log(logger, level, () -> { + var args = new Object[1 + rest.length]; + args[0] = first; + System.arraycopy(rest, 0, args, 1, rest.length); + return String.format(format, args); + }); + } } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java index 5035ed1aa88..46fafddfade 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java @@ -28,6 +28,7 @@ public class StateChangeHandler { private static final Logger log = Logger.getLogger(StateChangeHandler.class.getName()); + private final FleetControllerContext context; private final Timer timer; private final EventLogInterface eventLog; private boolean stateMayHaveChanged = false; @@ -40,7 +41,8 @@ public class StateChangeHandler { private int maxSlobrokDisconnectGracePeriod = 1000; private static final boolean disableUnstableNodes = true; - public StateChangeHandler(Timer timer, EventLogInterface eventLog) { + public StateChangeHandler(FleetControllerContext context, Timer timer, EventLogInterface eventLog) { + this.context = context; this.timer = timer; this.eventLog = eventLog; maxTransitionTime.put(NodeType.DISTRIBUTOR, 5000); @@ -52,7 +54,7 @@ public class StateChangeHandler { final DatabaseHandler database, final DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { int startTimestampsReset = 0; - log.log(Level.FINE, "handleAllDistributorsInSync invoked for state version %d", currentState.getVersion()); + context.log(log, Level.FINE, "handleAllDistributorsInSync invoked for state version %d", currentState.getVersion()); for (NodeType nodeType : NodeType.getTypes()) { for (ConfiguredNode configuredNode : nodes) { final Node node = new Node(nodeType, configuredNode.index()); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java index bc8d84c4634..d061f7edbea 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java @@ -18,8 +18,9 @@ import java.util.stream.Collectors; public class SystemStateBroadcaster { - public static Logger log = Logger.getLogger(SystemStateBroadcaster.class.getName()); + private static Logger log = Logger.getLogger(SystemStateBroadcaster.class.getName()); + private final FleetControllerContext context; private final Timer timer; private final Object monitor; private ClusterStateBundle clusterStateBundle; @@ -37,7 +38,8 @@ public class SystemStateBroadcaster { private final SetClusterStateWaiter setClusterStateWaiter = new SetClusterStateWaiter(); private final ActivateClusterStateVersionWaiter activateClusterStateVersionWaiter = new ActivateClusterStateVersionWaiter(); - public SystemStateBroadcaster(Timer timer, Object monitor) { + public SystemStateBroadcaster(FleetControllerContext context, Timer timer, Object monitor) { + this.context = context; this.timer = timer; this.monitor = monitor; } @@ -70,7 +72,7 @@ public class SystemStateBroadcaster { long time = timer.getCurrentTimeInMillis(); Long lastReported = lastErrorReported.get(info.getNode()); boolean alreadySeen = (lastReported != null && time - lastReported < minTimeBetweenNodeErrorLogging); - log.log((nodeOk && !alreadySeen) ? Level.WARNING : Level.FINE, message); + context.log(log, nodeOk && !alreadySeen ? Level.WARNING : Level.FINE, message); if (!alreadySeen) { lastErrorReported.put(info.getNode(), time); } @@ -96,12 +98,17 @@ public class SystemStateBroadcaster { // NO_SUCH_METHOD implies node is on a version that does not understand explicit activations // and it has already merrily started using the state version. Treat as if it had been ACKed. if (reply.getReturnCode() != ErrorCode.NO_SUCH_METHOD) { - log.log(Level.FINE, () -> String.format("Activation NACK for node %s with version %d, message %s", - info, version, reply.getReturnMessage())); + context.log(log, + Level.FINE, + () -> String.format("Activation NACK for node %s with version %d, message %s", + info, version, reply.getReturnMessage())); success = false; } else { - log.log(Level.FINE, () -> String.format("Node %s did not understand state activation RPC; " + - "implicitly treating state %d as activated on node", info, version)); + context.log(log, + Level.FINE, + () -> String.format("Node %s did not understand state activation RPC; " + + "implicitly treating state %d as activated on node", + info, version)); } } else if (reply.getActualVersion() != version) { boolean nodeOk = nodeReportsSelfAsAvailable(info); @@ -113,8 +120,10 @@ public class SystemStateBroadcaster { version, info, reply.getActualVersion())); success = false; } else { - log.log(Level.FINE, () -> String.format("Node %s reports successful activation of state " + - "version %d", info, version)); + context.log(log, + Level.FINE, + () -> String.format("Node %s reports successful activation of state version %d", + info, version)); } info.setSystemStateVersionActivationAcked(version, success); // TODO we currently don't invoke reportNodeError here.. We assume that node errors will be reported @@ -144,7 +153,7 @@ public class SystemStateBroadcaster { } } else { info.setClusterStateBundleVersionAcknowledged(version, true); - log.log(Level.FINE, () -> String.format("Node %s ACKed system state version %d.", info, version)); + context.log(log, Level.FINE, () -> String.format("Node %s ACKed system state version %d.", info, version)); lastErrorReported.remove(info.getNode()); } } @@ -220,8 +229,10 @@ public class SystemStateBroadcaster { if (!anyDistributorsNeedStateBundle && (currentStateVersion > lastStateVersionBundleAcked)) { markCurrentClusterStateBundleAsReceivedByAllDistributors(); if (clusterStateBundle.deferredActivation()) { - log.log(Level.FINE, () -> String.format("All distributors have ACKed cluster state " + - "version %d, sending activation", currentStateVersion)); + context.log(log, + Level.FINE, + () -> String.format("All distributors have ACKed cluster state " + + "version %d, sending activation", currentStateVersion)); } else { markCurrentClusterStateAsConverged(database, dbContext, fleetController); } @@ -239,8 +250,10 @@ public class SystemStateBroadcaster { if (!anyDistributorsNeedActivation && (currentStateVersion > lastClusterStateVersionConverged)) { markCurrentClusterStateAsConverged(database, dbContext, fleetController); } else { - log.log(Level.FINE, () -> String.format("distributors still need activation in state %d (last converged: %d)", - currentStateVersion, lastClusterStateVersionConverged)); + context.log(log, + Level.FINE, + () -> String.format("distributors still need activation in state %d (last converged: %d)", + currentStateVersion, lastClusterStateVersionConverged)); } } @@ -249,7 +262,7 @@ public class SystemStateBroadcaster { } private void markCurrentClusterStateAsConverged(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext, FleetController fleetController) throws InterruptedException { - log.log(Level.FINE, "All distributors have newest clusterstate, updating start timestamps in zookeeper and clearing them from cluster state"); + context.log(log, Level.FINE, "All distributors have newest clusterstate, updating start timestamps in zookeeper and clearing them from cluster state"); lastClusterStateVersionConverged = clusterStateBundle.getVersion(); lastClusterStateBundleConverged = clusterStateBundle; fleetController.handleAllDistributorsInSync(database, dbContext); @@ -279,7 +292,7 @@ public class SystemStateBroadcaster { ClusterState baselineState = clusterStateBundle.getBaselineClusterState(); if (!currentBundleVersionIsTaggedOfficial()) { - log.log(Level.INFO, String.format("Publishing cluster state version %d", baselineState.getVersion())); + context.log(log, Level.INFO, "Publishing cluster state version " + baselineState.getVersion()); tagCurrentBundleVersionAsOfficial(); } @@ -288,13 +301,17 @@ public class SystemStateBroadcaster { if (nodeNeedsToObserveStartupTimestamps(node)) { // TODO this is the same for all nodes, compute only once ClusterStateBundle modifiedBundle = clusterStateBundle.cloneWithMapper(state -> buildModifiedClusterState(state, dbContext)); - log.log(Level.FINE, () -> String.format("Sending modified cluster state version %d" + - " to node %s: %s", baselineState.getVersion(), node, modifiedBundle)); + context.log(log, + Level.FINE, + () -> "Sending modified cluster state version " + baselineState.getVersion() + + " to node " + node + ": " + modifiedBundle); communicator.setSystemState(modifiedBundle, node, setClusterStateWaiter); } else { - log.log(Level.FINE, () -> String.format("Sending system state version %d to node %s. " + - "(went down time %d, node start time %d)", baselineState.getVersion(), node, - node.getWentDownWithStartTime(), node.getStartTimestamp())); + context.log(log, + Level.FINE, + () -> "Sending system state version " + baselineState.getVersion() + + " to node " + node + ". (went down time " + node.getWentDownWithStartTime() + + ", node start time " + node.getStartTimestamp() + ")"); communicator.setSystemState(clusterStateBundle, node, setClusterStateWaiter); } } @@ -313,8 +330,10 @@ public class SystemStateBroadcaster { var recipients = resolveStateActivationSendSet(dbContext); for (NodeInfo node : recipients) { - log.log(Level.FINE, () -> String.format("Sending cluster state activation to node %s for version %d", - node, clusterStateBundle.getVersion())); + context.log(log, + Level.FINE, + () -> "Sending cluster state activation to node " + node + " for version " + + clusterStateBundle.getVersion()); communicator.activateClusterStateVersion(clusterStateBundle.getVersion(), node, activateClusterStateVersionWaiter); } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java index a7c909ded95..0c6c773a9bc 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java @@ -378,7 +378,7 @@ public class DatabaseHandler { public int getLatestSystemStateVersion() { fleetControllerContext.log(logger, Level.FINE, () -> "Retrieving latest system state version."); synchronized (databaseMonitor) { - if (database != null && !database.isClosed()) { + if (database != null) { currentlyStored.lastSystemStateVersion = database.retrieveLatestSystemStateVersion(); } } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabase.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabase.java index fe716eea288..72c81489351 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabase.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabase.java @@ -7,6 +7,7 @@ import com.yahoo.vdslib.state.State; import com.yahoo.vespa.clustercontroller.core.AnnotatedClusterState; import com.yahoo.vespa.clustercontroller.core.ClusterStateBundle; import com.yahoo.vespa.clustercontroller.core.ContentCluster; +import com.yahoo.vespa.clustercontroller.core.FleetControllerContext; import com.yahoo.vespa.clustercontroller.core.rpc.EnvelopedClusterStateBundleCodec; import com.yahoo.vespa.clustercontroller.core.rpc.SlimeClusterStateBundleCodec; import com.yahoo.vespa.zookeeper.client.ZkClientConfigBuilder; @@ -42,18 +43,14 @@ public class ZooKeeperDatabase extends Database { private final ZooKeeperWatcher watcher = new ZooKeeperWatcher(); private final ZooKeeper session; private boolean sessionOpen = true; + private final FleetControllerContext context; private final int nodeIndex; private final MasterDataGatherer masterDataGatherer; - private boolean reportErrors = true; // Expected ZK znode versions. Note: these are _not_ -1 as that would match anything. // We expect the caller to invoke the load methods prior to calling any store methods. private int lastKnownStateBundleZNodeVersion = -2; private int lastKnownStateVersionZNodeVersion = -2; - public void stopErrorReporting() { - reportErrors = false; - } - private class ZooKeeperWatcher implements Watcher { private Event.KeeperState state = null; @@ -62,50 +59,51 @@ public class ZooKeeperDatabase extends Database { public void process(WatchedEvent watchedEvent) { // Shouldn't get events after we expire, but just be sure we stop them here. if (state != null && state.equals(Event.KeeperState.Expired)) { - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got event from ZooKeeper session after it expired"); + context.log(log, Level.WARNING, "Got event from ZooKeeper session after it expired"); return; } Event.KeeperState newState = watchedEvent.getState(); if (state == null || !state.equals(newState)) switch (newState) { case Expired: - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + ": Zookeeper session expired"); + context.log(log, Level.INFO, "Zookeeper session expired"); sessionOpen = false; listener.handleZooKeeperSessionDown(); break; case Disconnected: - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + ": Lost connection to zookeeper server"); + context.log(log, Level.INFO, "Lost connection to zookeeper server"); sessionOpen = false; listener.handleZooKeeperSessionDown(); break; case SyncConnected: - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + ": Connection to zookeeper server established. Refetching master data"); + context.log(log, Level.INFO, "Connection to zookeeper server established. Refetching master data"); if (masterDataGatherer != null) { masterDataGatherer.restart(); } } switch (watchedEvent.getType()) { case NodeChildrenChanged: // Fleetcontrollers have either connected or disconnected to ZooKeeper - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeChildrenChanged"); + context.log(log, Level.WARNING, "Got unexpected ZooKeeper event NodeChildrenChanged"); break; case NodeDataChanged: // A fleetcontroller have changed what node it is voting for - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDataChanged"); + context.log(log, Level.WARNING, "Got unexpected ZooKeeper event NodeDataChanged"); break; case NodeCreated: // How can this happen? Can one leave watches on non-existing nodes? - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeCreated"); + context.log(log, Level.WARNING, "Got unexpected ZooKeeper event NodeCreated"); break; case NodeDeleted: // We're not watching any nodes for whether they are deleted or not. - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDeleted"); + context.log(log, Level.WARNING, "Got unexpected ZooKeeper event NodeDeleted"); break; case None: if (state != null && state.equals(watchedEvent.getState())) { - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got None type event that didn't even alter session state. What does that indicate?"); + context.log(log, Level.WARNING, "Got None type event that didn't even alter session state. What does that indicate?"); } } state = watchedEvent.getState(); } } - public ZooKeeperDatabase(ContentCluster cluster, int nodeIndex, String address, int timeout, Database.DatabaseListener zksl) throws IOException, KeeperException, InterruptedException { + public ZooKeeperDatabase(FleetControllerContext context, ContentCluster cluster, int nodeIndex, String address, int timeout, DatabaseListener zksl) throws IOException, KeeperException, InterruptedException { + this.context = context; this.nodeIndex = nodeIndex; zooKeeperRoot = "/vespa/fleetcontroller/" + cluster.getName() + "/"; session = new ZooKeeper(address, timeout, watcher, new ZkClientConfigBuilder().toConfig()); @@ -113,7 +111,7 @@ public class ZooKeeperDatabase extends Database { try{ this.listener = zksl; setupRoot(); - log.log(Level.FINEST, () -> "Fleetcontroller " + nodeIndex + ": Asking for initial data on master election"); + context.log(log, Level.FINEST, "Asking for initial data on master election"); masterDataGatherer = new MasterDataGatherer(session, zooKeeperRoot, listener, nodeIndex); completedOk = true; } finally { @@ -124,14 +122,14 @@ public class ZooKeeperDatabase extends Database { private void createNode(String prefix, String nodename, byte[] value) throws KeeperException, InterruptedException { try{ if (session.exists(prefix + nodename, false) != null) { - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Zookeeper node '" + prefix + nodename + "' already exists. Not creating it"); + context.log(log, Level.FINE, () -> "Zookeeper node '" + prefix + nodename + "' already exists. Not creating it"); return; } session.create(prefix + nodename, value, acl, CreateMode.PERSISTENT); - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Created zookeeper node '" + prefix + nodename + "'"); + context.log(log, Level.FINE, () -> "Created zookeeper node '" + prefix + nodename + "'"); } catch (KeeperException.NodeExistsException e) { - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Node to create existed, " - + "but this is normal as other nodes may create them at the same time."); + context.log(log, Level.FINE, "Node to create existed, but this is normal as other nodes " + + "may create them at the same time."); } } @@ -149,14 +147,13 @@ public class ZooKeeperDatabase extends Database { createNode(zooKeeperRoot, "published_state_bundle", new byte[0]); // TODO dedupe string constants byte[] val = String.valueOf(nodeIndex).getBytes(utf8); deleteNodeIfExists(getMyIndexPath()); - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + - ": Creating ephemeral master vote node with vote to self."); + context.log(log, Level.INFO, "Creating ephemeral master vote node with vote to self."); session.create(getMyIndexPath(), val, acl, CreateMode.EPHEMERAL); } private void deleteNodeIfExists(String path) throws KeeperException, InterruptedException { if (session.exists(path, false) != null) { - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + ": Removing master vote node."); + context.log(log, Level.INFO, "Removing master vote node at " + path); session.delete(path, -1); } } @@ -172,11 +169,11 @@ public class ZooKeeperDatabase extends Database { public void close() { sessionOpen = false; try{ - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Trying to close ZooKeeper session 0x" + context.log(log, Level.FINE, () -> "Trying to close ZooKeeper session 0x" + Long.toHexString(session.getSessionId())); session.close(); } catch (InterruptedException e) { - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Got interrupt exception while closing session: " + e); + context.log(log, Level.WARNING, "Got interrupt exception while closing session: " + e); } } @@ -185,11 +182,10 @@ public class ZooKeeperDatabase extends Database { } private void maybeLogExceptionWarning(Exception e, String message) { - if (sessionOpen && reportErrors) { + if (sessionOpen) { StringWriter sw = new StringWriter(); e.printStackTrace(new PrintWriter(sw)); - log.log(Level.WARNING, String.format("Fleetcontroller %s: %s. Exception: %s\n%s", - nodeIndex, message, e.getMessage(), sw.toString())); + context.log(log, Level.WARNING, message + ". Exception: " + e.getMessage() + "\n" + sw); } } @@ -197,7 +193,7 @@ public class ZooKeeperDatabase extends Database { byte[] val = String.valueOf(wantedMasterIndex).getBytes(utf8); try{ session.setData(getMyIndexPath(), val, -1); - log.log(Level.INFO, "Fleetcontroller " + nodeIndex + ": Stored new vote in ephemeral node. " + nodeIndex + " -> " + wantedMasterIndex); + context.log(log, Level.INFO, "Stored new vote in ephemeral node. " + nodeIndex + " -> " + wantedMasterIndex); return true; } catch (InterruptedException e) { throw new RuntimeException(e); @@ -209,7 +205,7 @@ public class ZooKeeperDatabase extends Database { public boolean storeLatestSystemStateVersion(int version) { byte[] data = Integer.toString(version).getBytes(utf8); try{ - log.log(Level.INFO, String.format("Fleetcontroller %d: Storing new cluster state version in ZooKeeper: %d", nodeIndex, version)); + context.log(log, Level.INFO, "Storing new cluster state version in ZooKeeper: " + version); var stat = session.setData(zooKeeperRoot + "latestversion", data, lastKnownStateVersionZNodeVersion); lastKnownStateVersionZNodeVersion = stat.getVersion(); return true; @@ -226,24 +222,23 @@ public class ZooKeeperDatabase extends Database { public Integer retrieveLatestSystemStateVersion() { Stat stat = new Stat(); - try{ - log.log(Level.FINE, () -> String.format("Fleetcontroller %d: Fetching latest cluster state at '%slatestversion'", - nodeIndex, zooKeeperRoot)); - byte[] data = session.getData(zooKeeperRoot + "latestversion", false, stat); - lastKnownStateVersionZNodeVersion = stat.getVersion(); - final Integer versionNumber = Integer.valueOf(new String(data, utf8)); - log.log(Level.INFO, String.format("Fleetcontroller %d: Read cluster state version %d from ZooKeeper " + - "(znode version %d)", nodeIndex, versionNumber, stat.getVersion())); - return versionNumber; - } catch (InterruptedException e) { - throw new RuntimeException(e); - } catch (Exception e) { - // If we return a default, empty version, writes dependent on this bundle should only - // succeed if the previous znode version is 0, i.e. not yet created. + context.log(log, Level.FINE, "Fetching latest cluster state at '%slatestversion'", zooKeeperRoot); + final byte[] data; + try { + data = session.getData(zooKeeperRoot + "latestversion", false, stat); + } catch (KeeperException.NoNodeException e) { + // Initial condition: No latest version has ever been written (or ZK state completely wiped!) lastKnownStateVersionZNodeVersion = 0; - maybeLogExceptionWarning(e, "Failed to retrieve latest system state version used. Returning null"); + maybeLogExceptionWarning(e, "No latest system state found"); return null; + } catch (InterruptedException | KeeperException e) { + throw new RuntimeException("Failed to get " + zooKeeperRoot + "latestversion", e); } + + lastKnownStateVersionZNodeVersion = stat.getVersion(); + final Integer versionNumber = Integer.valueOf(new String(data, utf8)); + context.log(log, Level.INFO, "Read cluster state version %d from ZooKeeper (znode version %d)", versionNumber, stat.getVersion()); + return versionNumber; } public boolean storeWantedStates(Map<Node, NodeState> states) { @@ -262,7 +257,7 @@ public class ZooKeeperDatabase extends Database { } byte[] val = sb.toString().getBytes(utf8); try{ - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Storing wanted states at '" + zooKeeperRoot + "wantedstates'"); + context.log(log, Level.FINE, () -> "Storing wanted states at '" + zooKeeperRoot + "wantedstates'"); session.setData(zooKeeperRoot + "wantedstates", val, -1); return true; } catch (InterruptedException e) { @@ -275,7 +270,7 @@ public class ZooKeeperDatabase extends Database { public Map<Node, NodeState> retrieveWantedStates() { try{ - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Fetching wanted states at '" + zooKeeperRoot + "wantedstates'"); + context.log(log, Level.FINE, () -> "Fetching wanted states at '" + zooKeeperRoot + "wantedstates'"); Stat stat = new Stat(); byte[] data = session.getData(zooKeeperRoot + "wantedstates", false, stat); Map<Node, NodeState> wanted = new TreeMap<>(); @@ -290,7 +285,7 @@ public class ZooKeeperDatabase extends Database { NodeState nodeState = NodeState.deserialize(node.getType(), token.substring(colon + 1)); wanted.put(node, nodeState); } catch (Exception e) { - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Ignoring invalid wantedstate line in zookeeper '" + token + "'."); + context.log(log, Level.WARNING, "Ignoring invalid wantedstate line in zookeeper '" + token + "'."); } } } @@ -313,7 +308,7 @@ public class ZooKeeperDatabase extends Database { } byte val[] = sb.toString().getBytes(utf8); try{ - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Storing start timestamps at '" + zooKeeperRoot + "starttimestamps"); + context.log(log, Level.FINE, () -> "Storing start timestamps at '" + zooKeeperRoot + "starttimestamps"); session.setData(zooKeeperRoot + "starttimestamps", val, -1); return true; } catch (InterruptedException e) { @@ -327,7 +322,7 @@ public class ZooKeeperDatabase extends Database { @Override public Map<Node, Long> retrieveStartTimestamps() { try{ - log.log(Level.FINE, () -> "Fleetcontroller " + nodeIndex + ": Fetching start timestamps at '" + zooKeeperRoot + "starttimestamps'"); + context.log(log, Level.FINE, () -> "Fetching start timestamps at '" + zooKeeperRoot + "starttimestamps'"); Stat stat = new Stat(); byte[] data = session.getData(zooKeeperRoot + "starttimestamps", false, stat); Map<Node, Long> wanted = new TreeMap<Node, Long>(); @@ -342,7 +337,7 @@ public class ZooKeeperDatabase extends Database { Long timestamp = Long.valueOf(token.substring(colon + 1)); wanted.put(n, timestamp); } catch (Exception e) { - log.log(Level.WARNING, "Fleetcontroller " + nodeIndex + ": Ignoring invalid starttimestamp line in zookeeper '" + token + "'."); + context.log(log, Level.WARNING, "Ignoring invalid starttimestamp line in zookeeper '" + token + "'."); } } } @@ -360,9 +355,11 @@ public class ZooKeeperDatabase extends Database { EnvelopedClusterStateBundleCodec envelopedBundleCodec = new SlimeClusterStateBundleCodec(); byte[] encodedBundle = envelopedBundleCodec.encodeWithEnvelope(stateBundle); try{ - log.log(Level.FINE, () -> String.format("Fleetcontroller %d: Storing published state bundle %s at " + - "'%spublished_state_bundle' with expected znode version %d", - nodeIndex, stateBundle, zooKeeperRoot, lastKnownStateBundleZNodeVersion)); + context.log(log, + Level.FINE, + () -> String.format("Storing published state bundle %s at " + + "'%spublished_state_bundle' with expected znode version %d", + stateBundle, zooKeeperRoot, lastKnownStateBundleZNodeVersion)); var stat = session.setData(zooKeeperRoot + "published_state_bundle", encodedBundle, lastKnownStateBundleZNodeVersion); lastKnownStateBundleZNodeVersion = stat.getVersion(); } catch (InterruptedException e) { diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabaseFactory.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabaseFactory.java index 0f739eec1d0..71f39135609 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabaseFactory.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/ZooKeeperDatabaseFactory.java @@ -1,12 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.clustercontroller.core.database; +import com.yahoo.vespa.clustercontroller.core.FleetControllerContext; + public class ZooKeeperDatabaseFactory implements DatabaseFactory { + private final FleetControllerContext context; + + public ZooKeeperDatabaseFactory(FleetControllerContext context) { + this.context = context; + } + @Override public Database create(Params params) throws Exception { - return new ZooKeeperDatabase(params.cluster, params.nodeIndex, params.dbAddress, - params.dbSessionTimeout, params.listener); + return new ZooKeeperDatabase(context, params.cluster, params.nodeIndex, params.dbAddress, + params.dbSessionTimeout, params.listener); } } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java index c4894e41747..7487f9546b7 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java @@ -2,17 +2,17 @@ package com.yahoo.vespa.clustercontroller.core.rpc; -import com.yahoo.jrt.slobrok.api.SlobrokList; -import com.yahoo.jrt.slobrok.api.Mirror; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; -import com.yahoo.vdslib.state.NodeType; +import com.yahoo.jrt.slobrok.api.Mirror; +import com.yahoo.jrt.slobrok.api.SlobrokList; import com.yahoo.vdslib.state.Node; -import java.util.logging.Level; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vespa.clustercontroller.core.ContentCluster; +import com.yahoo.vespa.clustercontroller.core.FleetControllerContext; import com.yahoo.vespa.clustercontroller.core.NodeInfo; import com.yahoo.vespa.clustercontroller.core.NodeLookup; import com.yahoo.vespa.clustercontroller.core.Timer; -import com.yahoo.vespa.clustercontroller.core.ContentCluster; import com.yahoo.vespa.clustercontroller.core.listeners.NodeAddedOrRemovedListener; import java.util.Iterator; @@ -21,19 +21,22 @@ import java.util.List; import java.util.Map; import java.util.StringTokenizer; import java.util.TreeMap; +import java.util.logging.Level; import java.util.logging.Logger; public class SlobrokClient implements NodeLookup { public static final Logger log = Logger.getLogger(SlobrokClient.class.getName()); + private final FleetControllerContext context; private final Timer timer; private String[] connectionSpecs; private Mirror mirror; private Supervisor supervisor; private boolean freshMirror = false; - public SlobrokClient(Timer timer) { + public SlobrokClient(FleetControllerContext context, Timer timer) { + this.context = context; this.timer = timer; } @@ -81,9 +84,7 @@ public class SlobrokClient implements NodeLookup { if (freshMirror) { freshMirror = false; } else if (cluster.getSlobrokGenerationCount() == mirrorVersion) { - if (log.isLoggable(Level.FINEST)) { - log.log(Level.FINEST, "Slobrok still at generation count " + cluster.getSlobrokGenerationCount() + ". Not updating."); - } + context.log(log, Level.FINEST, () -> "Slobrok still at generation count " + cluster.getSlobrokGenerationCount() + ". Not updating."); return false; } @@ -150,16 +151,18 @@ public class SlobrokClient implements NodeLookup { cluster.setSlobrokGenerationCount(mirrorVersion); for (NodeInfo nodeInfo : cluster.getNodeInfo()) { if (slobrokNodes.containsKey(nodeInfo.getNode()) && nodeInfo.isRpcAddressOutdated()) { - log.log(Level.WARNING, "Node " + nodeInfo - + " was tagged NOT in slobrok even though it is. It was in the following lists:" - + (newNodes.contains(nodeInfo.getNode()) ? " newNodes" : "") - + (missingNodeInfos.contains(nodeInfo) ? " missingNodes" : "") - + (alteredRpcAddressNodes.contains(nodeInfo.getNode()) ? " alteredNodes" : "") - + (returningNodeInfos.contains(nodeInfo) ? " returningNodes" : "")); + context.log(log, + Level.WARNING, + "Node " + nodeInfo + + " was tagged NOT in slobrok even though it is. It was in the following lists:" + + (newNodes.contains(nodeInfo.getNode()) ? " newNodes" : "") + + (missingNodeInfos.contains(nodeInfo) ? " missingNodes" : "") + + (alteredRpcAddressNodes.contains(nodeInfo.getNode()) ? " alteredNodes" : "") + + (returningNodeInfos.contains(nodeInfo) ? " returningNodes" : "")); nodeInfo.markRpcAddressLive(); } } - log.log(Level.FINEST, "Slobrok information updated to generation " + cluster.getSlobrokGenerationCount()); + context.log(log, Level.FINEST, () -> "Slobrok information updated to generation " + cluster.getSlobrokGenerationCount()); return true; } @@ -204,7 +207,7 @@ public class SlobrokClient implements NodeLookup { private Map<Node, SlobrokData> getSlobrokData(String pattern) { Map<Node, SlobrokData> result = new TreeMap<>(); List<Mirror.Entry> entries = mirror.lookup(pattern); - log.log(Level.FINEST, "Looking for slobrok entries with pattern '" + pattern + "'. Found " + entries.size() + " entries."); + context.log(log, Level.FINEST, () -> "Looking for slobrok entries with pattern '" + pattern + "'. Found " + entries.size() + " entries."); for (Mirror.Entry entry : entries) { StringTokenizer st = new StringTokenizer(entry.getName(), "/"); String addressType = st.nextToken(); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java index a52370a0654..f0b91102e8f 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java @@ -54,9 +54,9 @@ public class ClusterFeedBlockTest extends FleetControllerTest { var eventLog = new EventLog(timer, metricUpdater); var cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, eventLog); - var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer); - var stateGenerator = new StateChangeHandler(timer, eventLog); - var stateBroadcaster = new SystemStateBroadcaster(timer, timer); + var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); + var stateGenerator = new StateChangeHandler(context, timer, eventLog); + var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java index 75c31898408..4ce32484098 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java @@ -33,14 +33,11 @@ public class ClusterFixture { this.distribution = distribution; this.timer = new FakeTimer(); this.eventLog = mock(EventLogInterface.class); - this.nodeStateChangeHandler = createNodeStateChangeHandlerForCluster(); + var context = new FleetControllerContextImpl(new FleetControllerId(cluster.getName(), 0)); + this.nodeStateChangeHandler = new StateChangeHandler(context, timer, eventLog); this.params.cluster(this.cluster); } - private StateChangeHandler createNodeStateChangeHandlerForCluster() { - return new StateChangeHandler(timer, eventLog); - } - public ClusterFixture bringEntireClusterUp() { cluster.clusterInfo().getConfiguredNodes().forEach((idx, node) -> { reportStorageNodeState(idx, State.UP); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContextImplTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContextImplTest.java new file mode 100644 index 00000000000..450975076bb --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerContextImplTest.java @@ -0,0 +1,43 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +import static org.junit.Assert.assertEquals; + +/** + * @author hakonhall + */ +public class FleetControllerContextImplTest { + private final MockLogger logger = new MockLogger(); + public final FleetControllerId id = new FleetControllerId("clustername", 1); + private final FleetControllerContextImpl context = new FleetControllerContextImpl(id); + + @Test + public void verify() { + context.log(logger, Level.INFO, "A %s message", "log"); + + assertEquals(1, logger.records.size()); + assertEquals(Level.INFO, logger.records.get(0).getLevel()); + assertEquals("Cluster 'clustername': A log message", logger.records.get(0).getMessage()); + } + + private static class MockLogger extends Logger { + public List<LogRecord> records = new ArrayList<>(); + + public MockLogger() { + super(MockLogger.class.getName(), null); + } + + @Override + public void log(LogRecord record) { + records.add(record); + } + } +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java index d115f9f0060..c56b3bbdc69 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java @@ -174,18 +174,18 @@ public abstract class FleetControllerTest implements Waiter { options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); - var lookUp = new SlobrokClient(timer); + var lookUp = new SlobrokClient(context, timer); lookUp.setSlobrokConnectionSpecs(new String[0]); var rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); - var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer); + var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); // Setting this <1000 ms causes ECONNREFUSED on socket trying to connect to ZK server, in ZooKeeper, // after creating a new ZooKeeper (session). This causes ~10s extra time to connect after connection loss. // Reasons unknown. Larger values like the default 10_000 causes that much additional running time for some tests. database.setMinimumWaitBetweenFailedConnectionAttempts(2_000); - var stateGenerator = new StateChangeHandler(timer, log); - var stateBroadcaster = new SystemStateBroadcaster(timer, timer); + var stateGenerator = new StateChangeHandler(context, timer, log); + var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, status, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); if (startThread) { diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java index b370c29537d..95c097c5920 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java @@ -81,7 +81,8 @@ public class StateChangeHandlerTest { this.config = config; for (int i=0; i<config.nodeCount; ++i) configuredNodes.add(new ConfiguredNode(i, false)); cluster = new ContentCluster("testcluster", configuredNodes, distribution); - nodeStateChangeHandler = new StateChangeHandler(clock, eventLog); + var context = new FleetControllerContextImpl(new FleetControllerId(cluster.getName(), 0)); + nodeStateChangeHandler = new StateChangeHandler(context, clock, eventLog); params.minStorageNodesUp(1).minDistributorNodesUp(1) .minRatioOfStorageNodesUp(0.0).minRatioOfDistributorNodesUp(0.0) .maxPrematureCrashes(config.maxPrematureCrashes) diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java index b601412ecc4..a5bb65e11d0 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java @@ -55,9 +55,9 @@ public class StateChangeTest extends FleetControllerTest { eventLog = new EventLog(timer, metricUpdater); var cluster = new ContentCluster(options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, eventLog); - var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(), timer, options.zooKeeperServerAddress, timer); - var stateGenerator = new StateChangeHandler(timer, eventLog); - var stateBroadcaster = new SystemStateBroadcaster(timer, timer); + var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); + var stateGenerator = new StateChangeHandler(context, timer, eventLog); + var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcasterTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcasterTest.java index 84b479cfc29..45593375c0b 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcasterTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcasterTest.java @@ -26,7 +26,8 @@ public class SystemStateBroadcasterTest { private static class Fixture { FakeTimer timer = new FakeTimer(); final Object monitor = new Object(); - SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(timer, monitor); + FleetControllerContext context = mock(FleetControllerContext.class); + SystemStateBroadcaster broadcaster = new SystemStateBroadcaster(context, timer, monitor); Communicator mockCommunicator = mock(Communicator.class); DatabaseHandler mockDatabaseHandler = mock(DatabaseHandler.class); FleetController mockFleetController = mock(FleetController.class); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/TestFleetControllerContext.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/TestFleetControllerContext.java index b9d8474affb..6fe8f92ac97 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/TestFleetControllerContext.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/TestFleetControllerContext.java @@ -9,6 +9,10 @@ public class TestFleetControllerContext extends FleetControllerContextImpl { super(options); } + public TestFleetControllerContext(FleetControllerId id) { + super(id); + } + @Override protected String withLogPrefix(String message) { // Include fleet controller index in prefix in tests, since many may be running diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ZooKeeperDatabaseTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ZooKeeperDatabaseTest.java index d2407541680..a71665fb364 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ZooKeeperDatabaseTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ZooKeeperDatabaseTest.java @@ -35,8 +35,10 @@ public class ZooKeeperDatabaseTest { void createDatabase() throws Exception { closeDatabaseIfOpen(); - zkDatabase = new ZooKeeperDatabase(clusterFixture.cluster(), nodeIndex, zkServer.getAddress(), - (int)sessionTimeout.toMillis(), mockListener); + var id = new FleetControllerId(clusterFixture.cluster.getName(), nodeIndex); + var context = new TestFleetControllerContext(id); + zkDatabase = new ZooKeeperDatabase(context, clusterFixture.cluster(), nodeIndex, zkServer.getAddress(), + (int)sessionTimeout.toMillis(), mockListener); } ZooKeeperDatabase db() { return zkDatabase; } diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json index 15e53492860..8ad9f66ee6a 100644 --- a/config-model-api/abi-spec.json +++ b/config-model-api/abi-spec.json @@ -280,6 +280,19 @@ ], "fields": [] }, + "com.yahoo.config.application.api.DeploymentSpec$DeprecatedElement": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String, java.util.List, java.lang.String)", + "public java.lang.String humanReadableString()", + "public java.lang.String toString()" + ], + "fields": [] + }, "com.yahoo.config.application.api.DeploymentSpec$ParallelSteps": { "superClass": "com.yahoo.config.application.api.DeploymentSpec$Steps", "interfaces": [], @@ -375,7 +388,7 @@ "public" ], "methods": [ - "public void <init>(java.util.List, java.util.Optional, java.util.Optional, java.util.Optional, java.util.List, java.lang.String)", + "public void <init>(java.util.List, java.util.Optional, java.util.Optional, java.util.Optional, java.util.List, java.lang.String, java.util.List)", "public java.util.Optional majorVersion()", "public java.util.List steps()", "public java.util.Optional athenzDomain()", @@ -387,6 +400,7 @@ "public java.util.List instanceNames()", "public java.util.List instances()", "public java.util.List endpoints()", + "public java.util.List deprecatedElements()", "public static com.yahoo.config.application.api.DeploymentSpec fromXml(java.io.Reader)", "public static com.yahoo.config.application.api.DeploymentSpec fromXml(java.lang.String)", "public static com.yahoo.config.application.api.DeploymentSpec fromXml(java.lang.String, boolean)", @@ -597,7 +611,8 @@ "public static final enum com.yahoo.config.application.api.ValidationId accessControl", "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange", "public static final enum com.yahoo.config.application.api.ValidationId redundancyIncrease", - "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne" + "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne", + "public static final enum com.yahoo.config.application.api.ValidationId pagedSettingRemoval" ] }, "com.yahoo.config.application.api.ValidationOverrides$Allow": { diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java index 97ece3a675e..88363db6e49 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java @@ -39,7 +39,8 @@ public class DeploymentSpec { Optional.empty(), Optional.empty(), List.of(), - "<deployment version='1.0'/>"); + "<deployment version='1.0'/>", + List.of()); private final List<Step> steps; @@ -48,6 +49,7 @@ public class DeploymentSpec { private final Optional<AthenzDomain> athenzDomain; private final Optional<AthenzService> athenzService; private final List<Endpoint> endpoints; + private final List<DeprecatedElement> deprecatedElements; private final String xmlForm; @@ -56,13 +58,15 @@ public class DeploymentSpec { Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService, List<Endpoint> endpoints, - String xmlForm) { + String xmlForm, + List<DeprecatedElement> deprecatedElements) { this.steps = List.copyOf(Objects.requireNonNull(steps)); this.majorVersion = Objects.requireNonNull(majorVersion); this.athenzDomain = Objects.requireNonNull(athenzDomain); this.athenzService = Objects.requireNonNull(athenzService); this.xmlForm = Objects.requireNonNull(xmlForm); this.endpoints = List.copyOf(Objects.requireNonNull(endpoints)); + this.deprecatedElements = List.copyOf(Objects.requireNonNull(deprecatedElements)); validateTotalDelay(steps); validateUpgradePoliciesOfIncreasingConservativeness(steps); validateAthenz(); @@ -201,6 +205,11 @@ public class DeploymentSpec { return endpoints; } + /** Returns the deprecated elements used when creating this */ + public List<DeprecatedElement> deprecatedElements() { + return deprecatedElements; + } + private static List<DeploymentInstanceSpec> instances(List<DeploymentSpec.Step> steps) { return steps.stream() .flatMap(DeploymentSpec::flatten) @@ -576,5 +585,37 @@ public class DeploymentSpec { } + /** + * Represents a deprecated XML element in {@link com.yahoo.config.application.api.DeploymentSpec}, or the deprecated + * attribute(s) of an element. + */ + public static class DeprecatedElement { + + private final String tagName; + private final List<String> attributes; + private final String message; + + public DeprecatedElement(String tagName, List<String> attributes, String message) { + this.tagName = Objects.requireNonNull(tagName); + this.attributes = Objects.requireNonNull(attributes); + this.message = Objects.requireNonNull(message); + if (message.isBlank()) throw new IllegalArgumentException("message must be non-empty"); + } + + public String humanReadableString() { + if (attributes.isEmpty()) { + return "Element '" + tagName + "' is deprecated. " + message; + } + return "Element '" + tagName + "' contains deprecated attribute" + (attributes.size() > 1 ? "s" : "") + ": " + + attributes.stream().map(attr -> "'" + attr + "'").collect(Collectors.joining(", ")) + + ". " + message; + } + + @Override + public String toString() { + return humanReadableString(); + } + + } } diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java index b8f6caa70d3..71e0b0926b9 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java @@ -25,7 +25,8 @@ public enum ValidationId { accessControl("access-control"), // Internal use, used in zones where there should be no access-control globalEndpointChange("global-endpoint-change"), // Changing global endpoints redundancyIncrease("redundancy-increase"), // Increasing redundancy - may easily cause feed blocked - redundancyOne("redundancy-one"); // redundancy=1 requires a validation override on first deployment + redundancyOne("redundancy-one"), // redundancy=1 requires a validation override on first deployment + pagedSettingRemoval("paged-setting-removal"); // May cause content nodes to run out of memory private final String id; diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java index 6f77dce8fc5..aa985cd48bd 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java @@ -6,6 +6,7 @@ import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.config.application.api.DeploymentSpec.DeclaredTest; import com.yahoo.config.application.api.DeploymentSpec.DeclaredZone; import com.yahoo.config.application.api.DeploymentSpec.Delay; +import com.yahoo.config.application.api.DeploymentSpec.DeprecatedElement; import com.yahoo.config.application.api.DeploymentSpec.ParallelSteps; import com.yahoo.config.application.api.DeploymentSpec.Step; import com.yahoo.config.application.api.DeploymentSpec.Steps; @@ -66,6 +67,7 @@ public class DeploymentSpecXmlReader { private static final String testerFlavorAttribute = "tester-flavor"; private final boolean validate; + private final List<DeprecatedElement> deprecatedElements = new ArrayList<>(); /** Creates a validating reader */ public DeploymentSpecXmlReader() { @@ -92,6 +94,7 @@ public class DeploymentSpecXmlReader { /** Reads a deployment spec from XML */ public DeploymentSpec read(String xmlForm) { + deprecatedElements.clear(); Element root = XML.getDocument(xmlForm).getDocumentElement(); if ( ! root.getTagName().equals(deploymentTag)) illegal("The root tag must be <deployment>"); @@ -126,7 +129,8 @@ public class DeploymentSpecXmlReader { stringAttribute(athenzDomainAttribute, root).map(AthenzDomain::from), stringAttribute(athenzServiceAttribute, root).map(AthenzService::from), applicationEndpoints, - xmlForm); + xmlForm, + deprecatedElements); } /** @@ -404,6 +408,7 @@ public class DeploymentSpecXmlReader { private Optional<String> readGlobalServiceId(Element environmentTag) { String globalServiceId = environmentTag.getAttribute("global-service-id"); if (globalServiceId == null || globalServiceId.isEmpty()) return Optional.empty(); + deprecate(environmentTag, List.of("global-service-id"), "See https://cloud.vespa.ai/en/reference/routing#deprecated-syntax"); return Optional.of(globalServiceId); } @@ -477,12 +482,18 @@ public class DeploymentSpecXmlReader { private boolean readActive(Element regionTag) { String activeValue = regionTag.getAttribute("active"); + if ("".equals(activeValue)) return true; // Default to active + deprecate(regionTag, List.of("active"), "See https://cloud.vespa.ai/en/reference/routing#deprecated-syntax"); if ("true".equals(activeValue)) return true; if ("false".equals(activeValue)) return false; - throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " + + throw new IllegalArgumentException("Value of 'active' attribute in region tag must be 'true' or 'false' " + "to control whether this region should receive traffic from the global endpoint of this application"); } + private void deprecate(Element element, List<String> attributes, String message) { + deprecatedElements.add(new DeprecatedElement(element.getTagName(), attributes, message)); + } + private static boolean isEmptySpec(Element root) { if ( ! XML.getChildren(root).isEmpty()) return false; return root.getAttributes().getLength() == 0 diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java index 0154e5d3b13..08ec615e4c0 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java @@ -4,6 +4,7 @@ package com.yahoo.config.model.api; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.SystemName; import java.util.List; import java.util.Objects; @@ -17,9 +18,21 @@ import java.util.stream.Stream; * @author mortent */ public class ApplicationClusterEndpoint { + @Override + public String toString() { + return "ApplicationClusterEndpoint{" + + "dnsName=" + dnsName + + ", scope=" + scope + + ", routingMethod=" + routingMethod + + ", weight=" + weight + + ", hostNames=" + hostNames + + ", clusterId='" + clusterId + '\'' + + '}'; + } + public enum Scope {application, global, zone} - public enum RoutingMethod {shared, sharedLayer4} + public enum RoutingMethod {shared, sharedLayer4, exclusive} private final DnsName dnsName; private final Scope scope; @@ -99,6 +112,11 @@ public class ApplicationClusterEndpoint { return this; } + public Builder routingMethod(RoutingMethod routingMethod) { + this.routingMethod = routingMethod; + return this; + } + public Builder weight(int weigth) { this.weigth = weigth; return this; @@ -132,16 +150,25 @@ public class ApplicationClusterEndpoint { return name; } - // TODO: remove + // TODO: remove when 7.508 is latest version public static DnsName sharedNameFrom(ClusterSpec.Id cluster, ApplicationId applicationId, String suffix) { - String name = dnsParts(cluster, applicationId) + return sharedNameFrom(SystemName.main, cluster, applicationId, suffix); + } + + public static DnsName sharedNameFrom(SystemName systemName, ClusterSpec.Id cluster, ApplicationId applicationId, String suffix) { + String name = dnsParts(systemName, cluster, applicationId) .filter(Objects::nonNull) // remove null values that were "default" .collect(Collectors.joining("--")); return new DnsName(sanitize(name) + suffix); // Need to sanitize name since it is considered one label } + // TODO remove this method when 7.508 is latest version public static DnsName sharedL4NameFrom(ClusterSpec.Id cluster, ApplicationId applicationId, String suffix) { - String name = dnsParts(cluster, applicationId) + return sharedL4NameFrom(SystemName.main, cluster, applicationId, suffix); + } + + public static DnsName sharedL4NameFrom(SystemName systemName, ClusterSpec.Id cluster, ApplicationId applicationId, String suffix) { + String name = dnsParts(systemName, cluster, applicationId) .filter(Objects::nonNull) // remove null values that were "default" .map(DnsName::sanitize) .collect(Collectors.joining(".")); @@ -152,9 +179,10 @@ public class ApplicationClusterEndpoint { return new DnsName(name); } - private static Stream<String> dnsParts(ClusterSpec.Id cluster, ApplicationId applicationId) { + private static Stream<String> dnsParts(SystemName systemName, ClusterSpec.Id cluster, ApplicationId applicationId) { return Stream.of( nullIfDefault(cluster.value()), + systemPart(systemName), nullIfDefault(applicationId.instance().value()), applicationId.application().value(), applicationId.tenant().value() @@ -180,5 +208,16 @@ public class ApplicationClusterEndpoint { private static String nullIfDefault(String string) { return Optional.of(string).filter(s -> !s.equals("default")).orElse(null); } + + private static String systemPart(SystemName systemName) { + return "cd".equals(systemName.value()) ? systemName.value() : null; + } + + @Override + public String toString() { + return "DnsName{" + + "name='" + name + '\'' + + '}'; + } } } diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java index a114f9d40ef..78da750fb5b 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java @@ -3,6 +3,9 @@ package com.yahoo.config.model.api; import java.util.List; import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; /** * ContainerEndpoint tracks the service names that a Container Cluster should be @@ -16,11 +19,23 @@ public class ContainerEndpoint { private final String clusterId; private final ApplicationClusterEndpoint.Scope scope; private final List<String> names; + private final OptionalInt weight; + private final ApplicationClusterEndpoint.RoutingMethod routingMethod; public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names) { + this(clusterId, scope, names, OptionalInt.empty()); + } + + public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names, OptionalInt weight) { + this(clusterId, scope, names, weight, ApplicationClusterEndpoint.RoutingMethod.sharedLayer4); + } + + public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names, OptionalInt weight, ApplicationClusterEndpoint.RoutingMethod routingMethod) { this.clusterId = Objects.requireNonNull(clusterId); this.scope = Objects.requireNonNull(scope); this.names = List.copyOf(Objects.requireNonNull(names)); + this.weight = weight; + this.routingMethod = routingMethod; } public String clusterId() { @@ -35,6 +50,14 @@ public class ContainerEndpoint { return scope; } + public OptionalInt weight() { + return weight; + } + + public ApplicationClusterEndpoint.RoutingMethod routingMethod() { + return routingMethod; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -42,17 +65,18 @@ public class ContainerEndpoint { ContainerEndpoint that = (ContainerEndpoint) o; return Objects.equals(clusterId, that.clusterId) && Objects.equals(scope, that.scope) && - Objects.equals(names, that.names); + Objects.equals(names, that.names) && + Objects.equals(weight, that.weight) && + Objects.equals(routingMethod, that.routingMethod); } @Override public int hashCode() { - return Objects.hash(clusterId, names, scope); + return Objects.hash(clusterId, names, scope, weight, routingMethod); } @Override public String toString() { - return String.format("container endpoint %s -> %s [scope=%s]", clusterId, names, scope); + return String.format("container endpoint %s -> %s [scope=%s, weight=%s, routingMetod=%s]", clusterId, names, scope, weight, routingMethod); } - } diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java b/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java index 78ffc8667fe..dd4706461ad 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java @@ -22,6 +22,7 @@ public interface FileDistribution { */ void startDownload(String hostName, int port, Set<FileReference> fileReferences); - File getFileReferencesDir(); + // TODO: Remove when 7.508 is latest version in use + default File getFileReferencesDir() {return null; } } diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java index 42041f3a0b0..3df93f7d08d 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java @@ -108,6 +108,8 @@ public interface ModelContext { @ModelFeatureFlag(owners = {"geirst", "vekterli"}) default boolean asyncApplyBucketDiff() { return false; } @ModelFeatureFlag(owners = {"arnej"}) default boolean ignoreThreadStackSizes() { return false; } @ModelFeatureFlag(owners = {"vekterli", "geirst"}) default boolean unorderedMergeChaining() { return false; } + @ModelFeatureFlag(owners = {"arnej"}) default boolean useV8GeoPositions() { return false; } + @ModelFeatureFlag(owners = {"arnej", "baldersheim"}) default boolean useV8DocManagerCfg() { return false; } } /** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */ diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java index 74e79d5e8cf..43ccc34284f 100644 --- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java +++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java @@ -1162,8 +1162,8 @@ public class DeploymentSpecTest { var spec = DeploymentSpec.fromXml("<deployment>" + " <instance id='default'>" + " <prod>" + - " <region active=\"true\">us-east</region>" + - " <region active=\"true\">us-west</region>" + + " <region>us-east</region>" + + " <region>us-west</region>" + " </prod>" + " <endpoints>" + " <endpoint id=\"foo\" container-id=\"bar\">" + diff --git a/config-model/.gitignore b/config-model/.gitignore index b0f358e8113..4cf50da0853 100644 --- a/config-model/.gitignore +++ b/config-model/.gitignore @@ -4,3 +4,4 @@ /target /src/test/integration/*/copy/ /src/test/integration/*/models.generated/ +*.cfg.actual diff --git a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java index e86689c544f..27286a7dbbe 100644 --- a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java +++ b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java @@ -11,10 +11,11 @@ import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.cloud.config.ZookeepersConfig; import com.yahoo.cloud.config.log.LogdConfig; import com.yahoo.component.Version; +import com.yahoo.config.model.api.ModelContext; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.config.model.producer.AbstractConfigProducer; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig; import com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig; @@ -76,6 +77,14 @@ public class ApplicationConfigProducerRoot extends AbstractConfigProducer<Abstra this.applicationId = applicationId; } + private boolean useV8GeoPositions = false; + private boolean useV8DocManagerCfg = false; + + public void useFeatureFlags(ModelContext.FeatureFlags featureFlags) { + this.useV8GeoPositions = featureFlags.useV8GeoPositions(); + this.useV8DocManagerCfg = featureFlags.useV8DocManagerCfg(); + } + /** * @return an unmodifiable copy of the set of configIds in this VespaModel. */ @@ -151,12 +160,17 @@ public class ApplicationConfigProducerRoot extends AbstractConfigProducer<Abstra @Override public void getConfig(DocumentmanagerConfig.Builder builder) { - new DocumentManager().produce(documentModel, builder); + new DocumentManager() + .useV8GeoPositions(this.useV8GeoPositions) + .useV8DocManagerCfg(this.useV8DocManagerCfg) + .produce(documentModel, builder); } @Override public void getConfig(DocumenttypesConfig.Builder builder) { - new DocumentTypes().produce(documentModel, builder); + new DocumentTypes() + .useV8GeoPositions(this.useV8GeoPositions) + .produce(documentModel, builder); } @Override diff --git a/config-model/src/main/java/com/yahoo/config/model/CommonConfigsProducer.java b/config-model/src/main/java/com/yahoo/config/model/CommonConfigsProducer.java index 094b11dcbc7..f8632a6b187 100644 --- a/config-model/src/main/java/com/yahoo/config/model/CommonConfigsProducer.java +++ b/config-model/src/main/java/com/yahoo/config/model/CommonConfigsProducer.java @@ -10,7 +10,7 @@ import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.cloud.config.ClusterListConfig; import com.yahoo.cloud.config.ZookeepersConfig; import com.yahoo.cloud.config.ModelConfig; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig; import com.yahoo.messagebus.MessagebusConfig; diff --git a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java index 7c9c6292999..47a5fb24a43 100644 --- a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java +++ b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.model.producer; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.config.ConfigInstance; import com.yahoo.config.model.ApplicationConfigProducerRoot; import com.yahoo.config.model.deploy.DeployState; diff --git a/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java b/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java index 6fe56337a0d..fd98d21dcd5 100644 --- a/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java +++ b/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.model.test; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.Version; import com.yahoo.config.model.MapConfigModelRegistry; import com.yahoo.config.application.api.ApplicationPackage; diff --git a/config-model/src/main/java/com/yahoo/config/model/test/TestRoot.java b/config-model/src/main/java/com/yahoo/config/model/test/TestRoot.java index 06e483c102b..c1fd8e4646d 100644 --- a/config-model/src/main/java/com/yahoo/config/model/test/TestRoot.java +++ b/config-model/src/main/java/com/yahoo/config/model/test/TestRoot.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.model.test; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.config.ConfigInstance; import com.yahoo.config.model.ConfigModel; import com.yahoo.vespa.model.HostResource; diff --git a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java index 118714ca2b1..8848759b415 100644 --- a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java +++ b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java @@ -48,9 +48,14 @@ public class DataTypeRepo implements DataTypeCollection { { throw new IllegalStateException("Data type '" + type.getName() + "' is not registered."); } - typeByName.remove(type.getName()); + var oldByName = typeByName.remove(type.getName()); + var oldById = typeById.remove(type.getId()); + if (oldByName != oldById) { + throw new IllegalStateException("Data type '" + type.getName() + + "' inconsistent replace, by name: " + oldByName + + " but by id: " + oldById); + } typeByName.put(type.getName(), type); - typeById.remove(type.getId()); typeById.put(type.getId(), type); return this; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java index 8809cdeacc8..55f24123940 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java @@ -26,6 +26,7 @@ import com.yahoo.vespa.documentmodel.FieldView; import com.yahoo.vespa.documentmodel.SearchDef; import com.yahoo.vespa.documentmodel.SearchField; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -184,57 +185,97 @@ public class DocumentModelBuilder { } } } + + // This is how you make a "Pair" class in java.... + private static class TypeReplacement extends AbstractMap.SimpleEntry<DataType,DataType> { + DataType oldType() { return getKey(); } + DataType newType() { return getValue(); } + public TypeReplacement(DataType oldType, DataType newType) { + super(oldType, newType); + } + } + + private static String descT(DataType type) { + if (type == null) { return "<null>"; } + return "'" + type.getName() + "' [" + type.getId() + "] {"+type.getClass() + "}"; + } + private void addDocumentTypes(List<SDDocumentType> docList) { LinkedList<NewDocumentType> lst = new LinkedList<>(); for (SDDocumentType doc : docList) { lst.add(convert(doc)); model.getDocumentManager().add(lst.getLast()); } + Set<TypeReplacement> replacements = new HashSet<>(); for(NewDocumentType doc : lst) { - resolveTemporaries(doc.getAllTypes(), lst); + resolveTemporaries(doc.getAllTypes(), lst, replacements); + } + for(NewDocumentType doc : lst) { + for (var entry : replacements) { + var old = entry.oldType(); + if (doc.getDataType(old.getId()) == old) { + doc.replace(entry.newType()); + } + } } } - private static void resolveTemporaries(DataTypeCollection dtc, Collection<NewDocumentType> docs) { + + private static void resolveTemporaries(DataTypeCollection dtc, + Collection<NewDocumentType> docs, + Set<TypeReplacement> replacements) + { for (DataType type : dtc.getTypes()) { - resolveTemporariesRecurse(type, dtc, docs); + resolveTemporariesRecurse(type, dtc, docs, replacements); } } @SuppressWarnings("deprecation") private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo, - Collection<NewDocumentType> docs) { + Collection<NewDocumentType> docs, + Set<TypeReplacement> replacements) + { + DataType original = type; if (type instanceof TemporaryStructuredDataType) { - DataType struct = repo.getDataType(type.getId()); - if (struct != null) - type = struct; - else - type = getDocumentType(docs, type.getId()); - } - else if (type instanceof StructDataType) { + DataType other = repo.getDataType(type.getId()); + if (other == null || other == type) { + other = getDocumentType(docs, type.getId()); + } + if (other != null) { + type = other; + } + } else if (type instanceof DocumentType || type instanceof NewDocumentType) { + DataType other = getDocumentType(docs, type.getId()); + if (other != null) { + type = other; + } + } else if (type instanceof StructDataType) { StructDataType dt = (StructDataType) type; for (com.yahoo.document.Field field : dt.getFields()) { if (field.getDataType() != type) { // XXX deprecated: - field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs)); + field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs, replacements)); } } } else if (type instanceof MapDataType) { MapDataType t = (MapDataType) type; - t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs)); - t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs)); + t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs, replacements)); + t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs, replacements)); } else if (type instanceof CollectionDataType) { CollectionDataType t = (CollectionDataType) type; - t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs)); + t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs, replacements)); } else if (type instanceof ReferenceDataType) { ReferenceDataType t = (ReferenceDataType) type; if (t.getTargetType() instanceof TemporaryStructuredDataType) { - DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs); + DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs, replacements); t.setTargetType((StructuredDataType) targetType); } } + if (type != original) { + replacements.add(new TypeReplacement(original, type)); + } return type; } @@ -349,6 +390,13 @@ public class DocumentModelBuilder { throw new IllegalArgumentException("Data type '" + sdoc.getName() + "' is not a struct => tostring='" + sdoc.toString() + "'."); } } + for (SDDocumentType type : sdoc.getTypes()) { + for (SDDocumentType proxy : type.getInheritedTypes()) { + var inherited = dt.getDataTypeRecursive(proxy.getName()); + var converted = (StructDataType) dt.getDataType(type.getName()); + converted.inherit((StructDataType) inherited); + } + } for (AnnotationType annotation : sdoc.getAnnotations().values()) { dt.add(annotation); } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java index e2af71ebbf3..fef7ff56763 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java @@ -17,7 +17,6 @@ import com.yahoo.tensor.TensorType; import com.yahoo.tensor.evaluation.TypeContext; import java.util.ArrayDeque; -import java.util.Collection; import java.util.Collections; import java.util.Deque; import java.util.HashMap; @@ -65,7 +64,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement globallyResolvedTypes = new HashMap<>(); } - private MapEvaluationTypeContext(ImmutableMap<String, ExpressionFunction> functions, + private MapEvaluationTypeContext(Map<String, ExpressionFunction> functions, Map<String, String> bindings, Optional<MapEvaluationTypeContext> parent, Map<Reference, TensorType> featureTypes, @@ -250,7 +249,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement private Optional<ExpressionFunction> functionInvocation(Reference reference) { if (reference.output() != null) return Optional.empty(); - ExpressionFunction function = functions().get(reference.name()); + ExpressionFunction function = getFunctions().get(reference.name()); if (function == null) return Optional.empty(); if (function.arguments().size() != reference.arguments().size()) return Optional.empty(); return Optional.of(function); @@ -348,7 +347,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement @Override public MapEvaluationTypeContext withBindings(Map<String, String> bindings) { - return new MapEvaluationTypeContext(functions(), + return new MapEvaluationTypeContext(getFunctions(), bindings, Optional.of(this), featureTypes, diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfileRegistry.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfileRegistry.java index ec446e27670..08ae3d838ec 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfileRegistry.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfileRegistry.java @@ -6,7 +6,7 @@ import com.yahoo.searchdefinition.document.SDDocumentType; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -22,18 +22,15 @@ import java.util.Set; * * @author Ulf Lilleengen */ +// TODO: These should be stored in each schema as everything else public class RankProfileRegistry { private final Map<String, Map<String, RankProfile>> rankProfiles = new LinkedHashMap<>(); - private static final String MAGIC_GLOBAL_RANKPROFILES = "[MAGIC_GLOBAL_RANKPROFILES]"; + private static final String globalRankProfilesKey = "[global]"; /* These rank profiles can be overridden: 'default' rank profile, as that is documented to work. And 'unranked'. */ static final Set<String> overridableRankProfileNames = new HashSet<>(Arrays.asList("default", "unranked")); - public RankProfileRegistry() { - - } - public static RankProfileRegistry createRankProfileRegistryWithBuiltinRankProfiles(Schema schema) { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); rankProfileRegistry.add(new DefaultRankProfile(schema, rankProfileRegistry, schema.rankingConstants())); @@ -42,14 +39,10 @@ public class RankProfileRegistry { } private String extractName(ImmutableSchema search) { - return search != null ? search.getName() : MAGIC_GLOBAL_RANKPROFILES; + return search != null ? search.getName() : globalRankProfilesKey; } - /** - * Adds a rank profile to this registry - * - * @param rankProfile the rank profile to add - */ + /** Adds a rank profile to this registry */ public void add(RankProfile rankProfile) { String searchName = extractName(rankProfile.getSearch()); if ( ! rankProfiles.containsKey(searchName)) { @@ -91,7 +84,7 @@ public class RankProfileRegistry { } public RankProfile getGlobal(String name) { - Map<String, RankProfile> profiles = rankProfiles.get(MAGIC_GLOBAL_RANKPROFILES); + Map<String, RankProfile> profiles = rankProfiles.get(globalRankProfilesKey); if (profiles == null) return null; return profiles.get(name); } @@ -103,12 +96,13 @@ public class RankProfileRegistry { RankProfile parentProfile = resolve(parent, name); if (parentProfile != null) return parentProfile; } - return get(MAGIC_GLOBAL_RANKPROFILES, name); + return get(globalRankProfilesKey, name); } /** * Rank profiles that are collected across clusters. - * @return A set of global {@link RankProfile} instances. + * + * @return a set of global {@link RankProfile} instances */ public Collection<RankProfile> all() { List<RankProfile> all = new ArrayList<>(); @@ -119,26 +113,28 @@ public class RankProfileRegistry { } /** - * Returns the rank profiles of a given search definition. + * Retrieve all rank profiles for a schema * - * @param search the searchdefinition to get rank profiles for + * @param schema the schema to fetch rank profiles for, or null for the global ones * @return a collection of {@link RankProfile} instances */ - public Collection<RankProfile> rankProfilesOf(String search) { - Map<String, RankProfile> mapping = rankProfiles.get(search); - if (mapping == null) { - return Collections.emptyList(); + public Collection<RankProfile> rankProfilesOf(ImmutableSchema schema) { + String key = schema == null ? globalRankProfilesKey : schema.getName(); + + if ( ! rankProfiles.containsKey(key)) return List.of(); + + var profiles = new LinkedHashMap<>(rankProfiles.get(key)); + // Add all profiles in inherited schemas, unless they are already present (overridden) + while (schema != null && schema.inherited().isPresent()) { + schema = schema.inherited().get(); + var inheritedProfiles = rankProfiles.get(schema.getName()); + if (inheritedProfiles != null) { + for (Map.Entry<String, RankProfile> inheritedProfile : inheritedProfiles.entrySet()) { + profiles.putIfAbsent(inheritedProfile.getKey(), inheritedProfile.getValue()); + } + } } - return mapping.values(); - } - - /** - * Retrieve all rank profiles for a search definition - * @param search search definition to fetch rank profiles for, or null for the global ones - * @return Collection of RankProfiles - */ - public Collection<RankProfile> rankProfilesOf(ImmutableSchema search) { - return rankProfilesOf(extractName(search)); + return profiles.values(); } } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Schema.java b/config-model/src/main/java/com/yahoo/searchdefinition/Schema.java index c7a7ecd1d08..ddad67324ba 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/Schema.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/Schema.java @@ -344,11 +344,12 @@ public class Schema implements ImmutableSchema { } /** - * Returns a field defined in one of the documents of this search definition. This does <b>not</b> include the extra - * fields defined outside of a document (those accessible through the getExtraField() method). + * Returns a field defined in one of the documents of this search definition. + * This does not include the extra fields defined outside the document + * (those accessible through the getExtraField() method). * - * @param name The name of the field to return. - * @return The named field, or null if not found. + * @param name the name of the field to return + * @return the named field, or null if not found */ public SDField getDocumentField(String name) { return (SDField) documentType.getField(name); @@ -458,7 +459,7 @@ public class Schema implements ImmutableSchema { /** * Consolidates a set of index settings for the same index into one * - * @param indices The list of indexes to consolidate + * @param indices the list of indexes to consolidate * @return the consolidated index */ private Index consolidateIndices(List<Index> indices) { @@ -477,13 +478,10 @@ public class Schema implements ImmutableSchema { if (consolidated.getRankType() == null) { consolidated.setRankType(current.getRankType()); } else { - if (current.getRankType() != null && - !consolidated.getRankType().equals(current.getRankType())) - { + if (current.getRankType() != null && consolidated.getRankType() != current.getRankType()) deployLogger.logApplicationPackage(Level.WARNING, "Conflicting rank type settings for " + first.getName() + " in " + this + ", using " + consolidated.getRankType()); - } } for (Iterator<String> j = current.aliasIterator(); j.hasNext();) { @@ -505,11 +503,8 @@ public class Schema implements ImmutableSchema { } } - for (ImmutableSDField field : allConcreteFields()) { - for (Index index : field.getIndices().values()) { - allIndices.add(index); - } - } + for (ImmutableSDField field : allConcreteFields()) + allIndices.addAll(field.getIndices().values()); return Collections.unmodifiableList(allIndices); } @@ -618,17 +613,7 @@ public class Schema implements ImmutableSchema { return summaryFields; } - @Override - public int hashCode() { - return name.hashCode(); - } - - /** - * Returns the first occurrence of an attribute having this name, or null if none - * - * @param name Name of attribute - * @return The Attribute with given name. - */ + /** Returns the first occurrence of an attribute having this name, or null if none */ public Attribute getAttribute(String name) { for (ImmutableSDField field : allConcreteFields()) { Attribute attribute = field.getAttributes().get(name); @@ -650,33 +635,29 @@ public class Schema implements ImmutableSchema { } @Override + public int hashCode() { + return name.hashCode(); + } + + @Override public String toString() { return "schema '" + getName() + "'"; } public boolean isAccessingDiskSummary(SummaryField field) { - if (!field.getTransform().isInMemory()) { - return true; - } - if (field.getSources().size() == 0) { - return isAccessingDiskSummary(getName()); - } + if (!field.getTransform().isInMemory()) return true; + if (field.getSources().size() == 0) return isAccessingDiskSummary(getName()); for (SummaryField.Source source : field.getSources()) { - if (isAccessingDiskSummary(source.getName())) { + if (isAccessingDiskSummary(source.getName())) return true; - } } return false; } private boolean isAccessingDiskSummary(String source) { SDField field = getConcreteField(source); - if (field == null) { - return false; - } - if (field.doesSummarying() && !field.doesAttributing()) { - return true; - } + if (field == null) return false; + if (field.doesSummarying() && !field.doesAttributing()) return true; return false; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/SchemaBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/SchemaBuilder.java index 2ff4d2d44d0..098426865fb 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/SchemaBuilder.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/SchemaBuilder.java @@ -218,6 +218,8 @@ public class SchemaBuilder { public void build(boolean validate) { if (isBuilt) throw new IllegalStateException("Application already built"); + new TemporarySDTypeResolver(application.schemas().values(), deployLogger).process(); + if (validate) application.validate(deployLogger); diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/TemporarySDTypeResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/TemporarySDTypeResolver.java new file mode 100644 index 00000000000..2eaf0d5e5ba --- /dev/null +++ b/config-model/src/main/java/com/yahoo/searchdefinition/TemporarySDTypeResolver.java @@ -0,0 +1,79 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.searchdefinition; + +import com.yahoo.config.application.api.DeployLogger; +import com.yahoo.searchdefinition.document.SDDocumentType; +import com.yahoo.searchdefinition.document.TemporarySDDocumentType; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; +import java.util.logging.Level; + +/** + * @author arnej + */ +public class TemporarySDTypeResolver { + + private final DeployLogger deployLogger; + private final Collection<Schema> toProcess; + private final List<SDDocumentType> docTypes = new LinkedList<>(); + + public TemporarySDTypeResolver(Collection<Schema> schemas, DeployLogger deployLogger) { + this.deployLogger = deployLogger; + this.toProcess = schemas; + } + + private SDDocumentType findDocType(String name) { + assert(name != null); + for (var doc : docTypes) { + if (doc.getName().equals(name)) { + return doc; + } + } + deployLogger.logApplicationPackage(Level.WARNING, "No document type in application matching name: "+name); + return null; + } + + public void process() { + docTypes.add(SDDocumentType.VESPA_DOCUMENT); + for (Schema schema : toProcess) { + if (schema.hasDocument()) { + docTypes.add(schema.getDocument()); + } + } + // first, fix inheritance + for (SDDocumentType doc : docTypes) { + for (SDDocumentType inherited : doc.getInheritedTypes()) { + if (inherited instanceof TemporarySDDocumentType) { + var actual = findDocType(inherited.getName()); + if (actual != null) { + doc.inherit(actual); + } else { + deployLogger.logApplicationPackage(Level.WARNING, "Unresolved inherit '"+inherited.getName() +"' for document "+doc.getName()); + } + } + } + } + // next, check owned types (structs only?) + for (SDDocumentType doc : docTypes) { + for (SDDocumentType owned : doc.getTypes()) { + if (owned instanceof TemporarySDDocumentType) { + deployLogger.logApplicationPackage(Level.WARNING, "Schema '"+doc.getName()+"' owned type '"+owned.getName()+"' is temporary, should not happen"); + continue; + } + for (SDDocumentType inherited : owned.getInheritedTypes()) { + if (inherited instanceof TemporarySDDocumentType) { + var actual = doc.getType(inherited.getName()); + if (actual != null) { + owned.inherit(actual); + } else { + deployLogger.logApplicationPackage(Level.WARNING, "Unresolved inherit '"+inherited.getName() +"' for type '"+owned.getName()+"' in document "+doc.getName()); + } + } + } + } + } + } + +} diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java index 2890f1cc019..04ef85856cd 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java @@ -3,11 +3,11 @@ package com.yahoo.searchdefinition.derived; import com.yahoo.config.subscription.ConfigInstanceUtil; import com.yahoo.document.DataType; -import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; import com.yahoo.searchdefinition.document.Case; import com.yahoo.searchdefinition.document.Dictionary; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.searchdefinition.document.Ranking; import com.yahoo.searchdefinition.document.Sorting; @@ -69,8 +69,7 @@ public class AttributeFields extends Derived implements AttributesConfig.Produce private static boolean unsupportedFieldType(ImmutableSDField field) { return (field.usesStructOrMap() && !isSupportedComplexField(field) && - !field.getDataType().equals(PositionDataType.INSTANCE) && - !field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); + !GeoPos.isAnyPos(field)); } /** Returns an attribute by name, or null if it doesn't exist */ diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/DerivedConfiguration.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/DerivedConfiguration.java index 6165cdd2dc1..b72e8df0694 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/DerivedConfiguration.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/DerivedConfiguration.java @@ -8,7 +8,7 @@ import com.yahoo.config.model.api.ModelContext; import com.yahoo.config.model.application.provider.BaseDeployLogger; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.deploy.TestProperties; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.io.IOUtils; import com.yahoo.protect.Validator; @@ -91,7 +91,7 @@ public class DerivedConfiguration implements AttributesConfig.Producer { } if ( ! schema.isDocumentsOnly()) { attributeFields = new AttributeFields(schema); - summaries = new Summaries(schema, deployLogger); + summaries = new Summaries(schema, deployLogger, deployProperties.featureFlags()); summaryMap = new SummaryMap(schema); juniperrc = new Juniperrc(schema); rankProfileList = new RankProfileList(schema, schema.rankingConstants(), schema.rankExpressionFiles(), diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java index 10dca70ab6c..14e303522e0 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java @@ -1,6 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchdefinition.derived; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.searchdefinition.SchemaBuilder; import com.yahoo.searchdefinition.parser.ParseException; @@ -38,6 +38,12 @@ public class Deriver { return new DocumentManager().produce(getSearchBuilder(sds).getModel(), new DocumentmanagerConfig.Builder()); } + public static DocumentmanagerConfig.Builder getDocumentManagerConfig(List<String> sds, boolean useV8DocManagerCfg) { + return new DocumentManager() + .useV8DocManagerCfg(useV8DocManagerCfg) + .produce(getSearchBuilder(sds).getModel(), new DocumentmanagerConfig.Builder()); + } + public static DocumenttypesConfig.Builder getDocumentTypesConfig(String sd) { return getDocumentTypesConfig(Collections.singletonList(sd)); } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/ImportedFields.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/ImportedFields.java index a63b88f9445..3b8c0a9cff2 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/ImportedFields.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/ImportedFields.java @@ -2,9 +2,9 @@ package com.yahoo.searchdefinition.derived; import com.yahoo.document.DataType; -import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.searchdefinition.document.ImportedComplexField; import com.yahoo.searchdefinition.document.ImportedField; @@ -60,9 +60,8 @@ public class ImportedFields extends Derived implements ImportedFieldsConfig.Prod private static void considerComplexField(ImportedFieldsConfig.Builder builder, ImportedComplexField field) { ImmutableSDField targetField = field.targetField(); - if (targetField.getDataType().equals(PositionDataType.INSTANCE) || - targetField.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))) { - + if (GeoPos.isAnyPos(targetField)) { + // no action needed } else if (isArrayOfSimpleStruct(targetField)) { considerNestedFields(builder, field); } else if (isMapOfSimpleStruct(targetField)) { diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java index 879ad570c26..495c3da5d3a 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java @@ -6,7 +6,6 @@ import com.yahoo.document.DataType; import com.yahoo.document.Field; import com.yahoo.document.MapDataType; import com.yahoo.document.NumericDataType; -import com.yahoo.document.PositionDataType; import com.yahoo.document.PrimitiveDataType; import com.yahoo.document.StructuredDataType; import com.yahoo.searchdefinition.Index; @@ -15,6 +14,7 @@ import com.yahoo.searchdefinition.document.Attribute; import com.yahoo.searchdefinition.document.BooleanIndexDefinition; import com.yahoo.searchdefinition.document.Case; import com.yahoo.searchdefinition.document.FieldSet; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.searchdefinition.document.Matching; import com.yahoo.searchdefinition.document.Stemming; @@ -91,12 +91,8 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer { } } - private static boolean isPositionArrayField(ImmutableSDField field) { - return field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE)); - } - private static boolean isPositionField(ImmutableSDField field) { - return field.getDataType().equals(PositionDataType.INSTANCE) || isPositionArrayField(field); + return GeoPos.isAnyPos(field); } @Override diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexingScript.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexingScript.java index cabe8d001bd..23409729dbb 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexingScript.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexingScript.java @@ -2,8 +2,8 @@ package com.yahoo.searchdefinition.derived; import com.yahoo.document.DataType; -import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.vespa.configdefinition.IlscriptsConfig; import com.yahoo.vespa.configdefinition.IlscriptsConfig.Ilscript.Builder; @@ -58,9 +58,7 @@ public final class IndexingScript extends Derived implements IlscriptsConfig.Pro if (field.hasFullIndexingDocprocRights()) docFields.add(field.getName()); - if (field.usesStructOrMap() && - ! field.getDataType().equals(PositionDataType.INSTANCE) && - ! field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))) { + if (field.usesStructOrMap() && ! GeoPos.isAnyPos(field)) { return; // unsupported } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Summaries.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Summaries.java index 5fdc51e8f5f..1455fbc92e1 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Summaries.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Summaries.java @@ -2,6 +2,7 @@ package com.yahoo.searchdefinition.derived; import com.yahoo.config.application.api.DeployLogger; +import com.yahoo.config.model.api.ModelContext; import com.yahoo.searchdefinition.Schema; import com.yahoo.vespa.documentmodel.DocumentSummary; import com.yahoo.vespa.config.search.SummaryConfig; @@ -14,9 +15,11 @@ import java.util.List; */ public class Summaries extends Derived implements SummaryConfig.Producer { + private final boolean useV8GeoPositions; private List<SummaryClass> summaries=new java.util.ArrayList<>(1); - public Summaries(Schema schema, DeployLogger deployLogger) { + public Summaries(Schema schema, DeployLogger deployLogger, ModelContext.FeatureFlags featureFlags) { + this.useV8GeoPositions = featureFlags.useV8GeoPositions(); // Make sure the default is first summaries.add(new SummaryClass(schema, schema.getSummary("default"), deployLogger)); for (DocumentSummary summary : schema.getSummaries().values()) { @@ -31,6 +34,7 @@ public class Summaries extends Derived implements SummaryConfig.Producer { @Override public void getConfig(SummaryConfig.Builder builder) { builder.defaultsummaryid(summaries.isEmpty() ? -1 : summaries.get(0).hashCode()); + builder.usev8geopositions(useV8GeoPositions); for (SummaryClass summaryClass : summaries) { builder.classes(summaryClass.getSummaryClassConfig()); } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/VsmSummary.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/VsmSummary.java index 4ce486e13ba..03b9e795317 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/VsmSummary.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/VsmSummary.java @@ -1,8 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchdefinition.derived; -import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.SDDocumentType; import com.yahoo.searchdefinition.document.SDField; import com.yahoo.vespa.documentmodel.DocumentSummary; @@ -37,7 +37,7 @@ public class VsmSummary extends Derived implements VsmsummaryConfig.Producer { if (doMapField(schema, summaryField)) { SDField sdField = schema.getConcreteField(summaryField.getName()); - if (sdField != null && PositionDataType.INSTANCE.equals(sdField.getDataType())) { + if (sdField != null && GeoPos.isPos(sdField)) { summaryMap.put(summaryField, Collections.singletonList(summaryField.getName())); } else { summaryMap.put(summaryField, from); diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java index cc08e84ef9d..5ac6dd46102 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java @@ -114,6 +114,7 @@ public final class Attribute implements Cloneable, Serializable { public String getName() { return myName; } public String getExportAttributeTypeName() { return exportAttributeTypeName; } + @Override public String toString() { return "type: " + myName; } @@ -134,9 +135,11 @@ public final class Attribute implements Cloneable, Serializable { public String getName() { return name; } + @Override public String toString() { return "collectiontype: " + name; } + } /** Creates an attribute with default settings */ @@ -332,6 +335,7 @@ public final class Attribute implements Cloneable, Serializable { } } + @SuppressWarnings("deprecation") private DataType createReferenceDataType() { if (!referenceDocumentType.isPresent()) { throw new IllegalStateException("Referenced document type is not set!"); @@ -405,6 +409,7 @@ public final class Attribute implements Cloneable, Serializable { } } + @Override public String toString() { return "attribute '" + name + "' (" + type + ")"; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/ComplexAttributeFieldUtils.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/ComplexAttributeFieldUtils.java index 24a40154494..feac6b9618e 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/document/ComplexAttributeFieldUtils.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/ComplexAttributeFieldUtils.java @@ -59,8 +59,7 @@ public class ComplexAttributeFieldUtils { } private static boolean isStructWithPrimitiveStructFieldAttributes(DataType type, ImmutableSDField field) { - if (type instanceof StructDataType && - !(type.equals(PositionDataType.INSTANCE))) { + if (type instanceof StructDataType && ! GeoPos.isPos(type)) { for (ImmutableSDField structField : field.getStructFields()) { Attribute attribute = structField.getAttributes().get(structField.getName()); if (attribute != null) { diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/GeoPos.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/GeoPos.java new file mode 100644 index 00000000000..956d63a1cdf --- /dev/null +++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/GeoPos.java @@ -0,0 +1,26 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.searchdefinition.document; + +import com.yahoo.document.DataType; +import com.yahoo.document.PositionDataType; + +/** + * Common utilities for recognizing fields with the built-in "position" datatype, + * possibly in array form. + * @author arnej + */ +public class GeoPos { + static public boolean isPos(DataType type) { + return PositionDataType.INSTANCE.equals(type); + } + static public boolean isPosArray(DataType type) { + return DataType.getArray(PositionDataType.INSTANCE).equals(type); + } + static public boolean isAnyPos(DataType type) { + return isPos(type) || isPosArray(type); + } + + static public boolean isPos(ImmutableSDField field) { return isPos(field.getDataType()); } + static public boolean isPosArray(ImmutableSDField field) { return isPosArray(field.getDataType()); } + static public boolean isAnyPos(ImmutableSDField field) { return isAnyPos(field.getDataType()); } +} diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/AdjustPositionSummaryFields.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/AdjustPositionSummaryFields.java index 983942f87c3..766b6ed3fec 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/AdjustPositionSummaryFields.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/AdjustPositionSummaryFields.java @@ -8,6 +8,7 @@ import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.RankProfileRegistry; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.vespa.documentmodel.DocumentSummary; import com.yahoo.vespa.documentmodel.SummaryField; @@ -34,7 +35,7 @@ public class AdjustPositionSummaryFields extends Processor { private void scanSummary(DocumentSummary summary) { for (SummaryField summaryField : summary.getSummaryFields().values()) { - if ( ! isPositionDataType(summaryField.getDataType())) continue; + if ( ! GeoPos.isAnyPos(summaryField.getDataType())) continue; String originalSource = summaryField.getSingleSource(); if (originalSource.indexOf('.') == -1) { // Eliminate summary fields with pos.x or pos.y as source @@ -112,10 +113,6 @@ public class AdjustPositionSummaryFields extends Processor { return name.length() > suffix.length() && name.substring(name.length() - suffix.length()).equals(suffix); } - private static boolean isPositionDataType(DataType dataType) { - return dataType.equals(PositionDataType.INSTANCE) || dataType.equals(DataType.getArray(PositionDataType.INSTANCE)); - } - private static DataType makeZCurveDataType(DataType dataType) { return dataType instanceof ArrayDataType ? DataType.getArray(DataType.LONG) : DataType.LONG; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java index 699abb1e792..f5c1d8d8197 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java @@ -8,6 +8,7 @@ import com.yahoo.document.DataType; import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.SDField; import com.yahoo.vespa.documentmodel.SummaryField; import com.yahoo.vespa.documentmodel.SummaryTransform; @@ -142,10 +143,7 @@ public class CreatePositionZCurve extends Processor { } private static boolean isSupportedPositionType(DataType dataType) { - if (dataType instanceof ArrayDataType) { - dataType = ((ArrayDataType)dataType).getNestedType(); - } - return dataType.equals(PositionDataType.INSTANCE); + return GeoPos.isAnyPos(dataType); } private static class RemoveSummary extends ExpressionConverter { diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/DiversitySettingsValidator.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/DiversitySettingsValidator.java index 40de25dbc76..5643bb660f1 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/DiversitySettingsValidator.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/DiversitySettingsValidator.java @@ -22,7 +22,7 @@ public class DiversitySettingsValidator extends Processor { if ( ! validate) return; if (documentsOnly) return; - for (RankProfile rankProfile : rankProfileRegistry.rankProfilesOf(schema.getName())) { + for (RankProfile rankProfile : rankProfileRegistry.rankProfilesOf(schema)) { if (rankProfile.getMatchPhaseSettings() != null && rankProfile.getMatchPhaseSettings().getDiversity() != null) { validate(rankProfile, rankProfile.getMatchPhaseSettings().getDiversity()); } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImportedFieldsResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImportedFieldsResolver.java index f8a28061897..e836caac10d 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImportedFieldsResolver.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImportedFieldsResolver.java @@ -9,6 +9,7 @@ import com.yahoo.searchdefinition.DocumentReferences; import com.yahoo.searchdefinition.RankProfileRegistry; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.searchdefinition.document.ImportedComplexField; import com.yahoo.searchdefinition.document.ImportedField; @@ -49,8 +50,7 @@ public class ImportedFieldsResolver extends Processor { private void resolveImportedField(TemporaryImportedField importedField, boolean validate) { DocumentReference reference = validateDocumentReference(importedField); ImmutableSDField targetField = getTargetField(importedField, reference); - if (targetField.getDataType().equals(PositionDataType.INSTANCE) || - targetField.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))) { + if (GeoPos.isAnyPos(targetField)) { resolveImportedPositionField(importedField, reference, targetField, validate); } else if (isArrayOfSimpleStruct(targetField)) { resolveImportedArrayOfStructField(importedField, reference, targetField, validate); diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/IndexingValidation.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/IndexingValidation.java index eb9b561da73..242f5dab308 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/IndexingValidation.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/IndexingValidation.java @@ -5,11 +5,11 @@ import com.yahoo.config.application.api.DeployLogger; import com.yahoo.document.ArrayDataType; import com.yahoo.document.DataType; import com.yahoo.document.MapDataType; -import com.yahoo.document.PositionDataType; import com.yahoo.document.WeightedSetDataType; import com.yahoo.searchdefinition.RankProfileRegistry; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.Attribute; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.SDField; import com.yahoo.vespa.documentmodel.SummaryField; import com.yahoo.vespa.indexinglanguage.ExpressionConverter; @@ -153,7 +153,7 @@ public class IndexingValidation extends Processor { createCompatType(mapType.getValueType())); } else if (origType instanceof WeightedSetDataType) { return DataType.getWeightedSet(createCompatType(((WeightedSetDataType)origType).getNestedType())); - } else if (origType == PositionDataType.INSTANCE) { + } else if (GeoPos.isPos(origType)) { return DataType.LONG; } else { return origType; diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java index d108a620fd9..2a4f4f18759 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java @@ -16,7 +16,6 @@ import com.yahoo.vespa.model.container.search.QueryProfiles; */ public class PagedAttributeValidator extends Processor { - public PagedAttributeValidator(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, @@ -40,8 +39,8 @@ public class PagedAttributeValidator extends Processor { private void validatePagedSetting(Field field, Attribute attribute) { var tensorType = attribute.tensorType(); - if (!tensorType.isPresent() || - !isDenseTensorType(tensorType.get())) { + if (tensorType.isEmpty() + || !isDenseTensorType(tensorType.get())) { fail(schema, field, "The 'paged' attribute setting is only supported for dense tensor types"); } } @@ -49,4 +48,5 @@ public class PagedAttributeValidator extends Processor { private boolean isDenseTensorType(TensorType type) { return type.dimensions().stream().allMatch(d -> d.isIndexed()); } + } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java index a484476e978..1d4b39dfcc5 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java @@ -62,6 +62,7 @@ public class Processing { MultifieldIndexHarmonizer::new, FilterFieldNames::new, MatchConsistency::new, + ValidateStructTypeInheritance::new, ValidateFieldTypes::new, SummaryDiskAccessValidator::new, DisallowComplexMapAndWsetKeyTypes::new, diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ValidateStructTypeInheritance.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ValidateStructTypeInheritance.java new file mode 100644 index 00000000000..d99832e3df6 --- /dev/null +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ValidateStructTypeInheritance.java @@ -0,0 +1,76 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.searchdefinition.processing; + +import com.yahoo.searchdefinition.Schema; +import com.yahoo.config.application.api.DeployLogger; +import com.yahoo.searchdefinition.RankProfileRegistry; +import com.yahoo.vespa.model.container.search.QueryProfiles; + +import com.yahoo.document.DataType; +import com.yahoo.document.Field; +import com.yahoo.document.StructDataType; +import com.yahoo.searchdefinition.document.SDDocumentType; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.HashSet; +import java.util.Set; + +/** + * @author arnej + */ +public class ValidateStructTypeInheritance extends Processor { + + public ValidateStructTypeInheritance(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) { + super(schema, deployLogger, rankProfileRegistry, queryProfiles); + } + + @Override + public void process(boolean validate, boolean documentsOnly) { + if (!validate) return; + verifyNoRedeclarations(schema.getDocument()); + } + + void fail(Field field, String message) { + throw newProcessException(schema, field, message); + } + + void verifyNoRedeclarations(SDDocumentType docType) { + for (SDDocumentType type : docType.allTypes().values()) { + if (type.isStruct()) { + var inheritedTypes = new ArrayList<SDDocumentType>(type.getInheritedTypes()); + for (int i = 0; i < inheritedTypes.size(); i++) { + SDDocumentType inherit = inheritedTypes.get(i); + for (var extra : inherit.getInheritedTypes()) { + if (! inheritedTypes.contains(extra)) { + inheritedTypes.add(extra); + } + } + } + if (inheritedTypes.isEmpty()) continue; + var seenFieldNames = new HashSet<>(); + for (var field : type.getDocumentType().contentStruct().getFieldsThisTypeOnly()) { + if (seenFieldNames.contains(field.getName())) { + // cannot happen? + fail(field, "struct "+type.getName()+" has multiple fields with same name: "+field.getName()); + } + seenFieldNames.add(field.getName()); + } + for (SDDocumentType inherit : inheritedTypes) { + if (inherit.isStruct()) { + for (var field : inherit.getDocumentType().contentStruct().getFieldsThisTypeOnly()) { + if (seenFieldNames.contains(field.getName())) { + fail(field, "struct "+type.getName()+" cannot inherit from "+inherit.getName()+" and redeclare field "+field.getName()); + } + seenFieldNames.add(field.getName()); + } + } else { + fail(new Field("no field"), "struct cannot inherit from non-struct "+inherit.getName()+" class "+inherit.getClass()); + } + } + } + } + } + +} diff --git a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java index 8605389e1c2..9b4b3eba3a7 100644 --- a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java +++ b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java @@ -11,23 +11,55 @@ import com.yahoo.documentmodel.NewDocumentType; import com.yahoo.documentmodel.VespaDocumentType; import com.yahoo.searchdefinition.document.FieldSet; import com.yahoo.vespa.documentmodel.DocumentModel; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; import java.util.Set; /** - * @author baldersheim + * @author baldersheim + * @author arnej */ public class DocumentManager { + private boolean useV8GeoPositions = false; + private boolean useV8DocManagerCfg = false; + + public DocumentManager useV8GeoPositions(boolean value) { + this.useV8GeoPositions = value; + return this; + } + public DocumentManager useV8DocManagerCfg(boolean value) { + this.useV8DocManagerCfg = value; + return this; + } + public DocumentmanagerConfig.Builder produce(DocumentModel model, - DocumentmanagerConfig.Builder documentConfigBuilder) { + DocumentmanagerConfig.Builder documentConfigBuilder) + { + if (useV8DocManagerCfg) { + return produceDocTypes(model, documentConfigBuilder); + } else { + return produceDataTypes(model, documentConfigBuilder); + } + } + + public DocumentmanagerConfig.Builder produceDataTypes(DocumentModel model, + DocumentmanagerConfig.Builder documentConfigBuilder) + { documentConfigBuilder.enablecompression(false); + documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); - if ( documentType != VespaDocumentType.INSTANCE) { + if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { + handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); @@ -36,11 +68,18 @@ public class DocumentManager { return documentConfigBuilder; } + @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { - for (DataType dataType : type.getTypes()) { + List<DataType> todo = new ArrayList<>(type.getTypes()); + Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) + ? a.getId() - b.getId() + : a.getName().compareTo(b.getName()))); + for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); - if (dataType instanceof TemporaryStructuredDataType) continue; + if (dataType instanceof TemporaryStructuredDataType) { + throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName()); + } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); @@ -80,24 +119,16 @@ public class DocumentManager { } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). - datatype(dt.getNestedType().getId()). - createifnonexistant(dt.createIfNonExistent()). - removeifzero(dt.removeIfZero())); + datatype(dt.getNestedType().getId()). + createifnonexistant(dt.createIfNonExistent()). + removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). - keytype(mtype.getKeyType().getId()). - valtype(mtype.getValueType().getId())); + keytype(mtype.getKeyType().getId()). + valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { - DocumentType dt = (DocumentType) type; - Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); - builder.documenttype(doc); - doc. - name(dt.getName()). - headerstruct(dt.contentStruct().getId()); - for (DocumentType inherited : dt.getInheritedTypes()) { - doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); - } + throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName()); } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); @@ -111,7 +142,7 @@ public class DocumentManager { buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { - //Ignored + throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); @@ -143,7 +174,7 @@ public class DocumentManager { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { - throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); + throw new IllegalArgumentException("Can not create config for data type " + type + " of class " + type.getClass()); } } @@ -165,4 +196,271 @@ public class DocumentManager { } } + + // Alternate (new) way to build config: + + public DocumentmanagerConfig.Builder produceDocTypes(DocumentModel model, DocumentmanagerConfig.Builder builder) { + builder.usev8geopositions(this.useV8GeoPositions); + Map<NewDocumentType.Name, NewDocumentType> produced = new HashMap<>(); + var indexMap = new IdxMap(); + for (NewDocumentType documentType : model.getDocumentManager().getTypes()) { + docTypeInheritOrder(documentType, builder, produced, indexMap); + } + indexMap.verifyAllDone(); + return builder; + } + + private void docTypeInheritOrder(NewDocumentType documentType, + DocumentmanagerConfig.Builder builder, + Map<NewDocumentType.Name, NewDocumentType> produced, + IdxMap indexMap) + { + if (! produced.containsKey(documentType.getFullName())) { + for (NewDocumentType inherited : documentType.getInherited()) { + docTypeInheritOrder(inherited, builder, produced, indexMap); + } + docTypeBuild(documentType, builder, indexMap); + produced.put(documentType.getFullName(), documentType); + } + } + + static private class IdxMap { + private Map<Integer, Boolean> doneMap = new HashMap<>(); + private Map<Object, Integer> map = new IdentityHashMap<>(); + void add(Object someType) { + assert(someType != null); + // the adding of "10000" here is mostly to make it more + // unique to grep for when debugging + int nextIdx = 10000 + map.size(); + map.computeIfAbsent(someType, k -> nextIdx); + } + int idxOf(Object someType) { + if (someType instanceof DocumentType) { + var dt = (DocumentType) someType; + if (dt.getId() == 8) { + return idxOf(VespaDocumentType.INSTANCE); + } + } + add(someType); + return map.get(someType); + } + boolean isDone(Object someType) { + return doneMap.computeIfAbsent(idxOf(someType), k -> false); + } + void setDone(Object someType) { + assert(! isDone(someType)); + doneMap.put(idxOf(someType), true); + } + void verifyAllDone() { + for (var entry : map.entrySet()) { + Object needed = entry.getKey(); + if (! isDone(needed)) { + throw new IllegalArgumentException("Could not generate config for all needed types, missing: " + + needed + " of class " + needed.getClass()); + } + } + } + } + + private void docTypeBuild(NewDocumentType documentType, DocumentmanagerConfig.Builder builder, IdxMap indexMap) { + DocumentmanagerConfig.Doctype.Builder db = new DocumentmanagerConfig.Doctype.Builder(); + db. + idx(indexMap.idxOf(documentType)). + name(documentType.getName()). + contentstruct(indexMap.idxOf(documentType.getHeader())); + docTypeBuildFieldSets(documentType.getFieldSets(), db); + docTypeBuildImportedFields(documentType.getImportedFieldNames(), db); + for (NewDocumentType inherited : documentType.getInherited()) { + db.inherits(b -> b.idx(indexMap.idxOf(inherited))); + } + docTypeBuildAnyType(documentType.getHeader(), db, indexMap); + for (DataType dt : documentType.getAllTypes().getTypes()) { + docTypeBuildAnyType(dt, db, indexMap); + } + for (AnnotationType annotation : documentType.getAnnotations()) { + docTypeBuildAnnotationType(annotation, db, indexMap); + } + builder.doctype(db); + indexMap.setDone(documentType); + } + + private void docTypeBuildFieldSets(Set<FieldSet> fieldSets, DocumentmanagerConfig.Doctype.Builder db) { + for (FieldSet fs : fieldSets) { + docTypeBuildOneFieldSet(fs, db); + } + } + + private void docTypeBuildOneFieldSet(FieldSet fs, DocumentmanagerConfig.Doctype.Builder db) { + db.fieldsets(fs.getName(), new DocumentmanagerConfig.Doctype.Fieldsets.Builder().fields(fs.getFieldNames())); + } + + private void docTypeBuildAnnotationType(AnnotationType annotation, DocumentmanagerConfig.Doctype.Builder builder, IdxMap indexMap) { + if (indexMap.isDone(annotation)) { + return; + } + indexMap.setDone(annotation); + var annBuilder = new DocumentmanagerConfig.Doctype.Annotationtype.Builder(); + annBuilder + .idx(indexMap.idxOf(annotation)) + .name(annotation.getName()) + .internalid(annotation.getId()); + DataType nested = annotation.getDataType(); + if (nested != null) { + annBuilder.datatype(indexMap.idxOf(nested)); + docTypeBuildAnyType(nested, builder, indexMap); + } + for (AnnotationType inherited : annotation.getInheritedTypes()) { + annBuilder.inherits(inhBuilder -> inhBuilder.idx(indexMap.idxOf(inherited))); + + } + builder.annotationtype(annBuilder); + } + + @SuppressWarnings("deprecation") + private void docTypeBuildAnyType(DataType type, DocumentmanagerConfig.Doctype.Builder documentBuilder, IdxMap indexMap) { + if (indexMap.isDone(type)) { + return; + } + if (type instanceof NewDocumentType) { + // should be in the top-level list and handled there + return; + } + if ((type instanceof DocumentType) && (type.getId() == 8)) { + // special handling + return; + } + indexMap.setDone(type); + if (type instanceof TemporaryStructuredDataType) { + throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); + } if (type instanceof StructDataType) { + docTypeBuildOneType((StructDataType) type, documentBuilder, indexMap); + } else if (type instanceof ArrayDataType) { + docTypeBuildOneType((ArrayDataType) type, documentBuilder, indexMap); + } else if (type instanceof WeightedSetDataType) { + docTypeBuildOneType((WeightedSetDataType) type, documentBuilder, indexMap); + } else if (type instanceof MapDataType) { + docTypeBuildOneType((MapDataType) type, documentBuilder, indexMap); + } else if (type instanceof AnnotationReferenceDataType) { + docTypeBuildOneType((AnnotationReferenceDataType) type, documentBuilder, indexMap); + } else if (type instanceof TensorDataType) { + docTypeBuildOneType((TensorDataType) type, documentBuilder, indexMap); + } else if (type instanceof ReferenceDataType) { + docTypeBuildOneType((ReferenceDataType) type, documentBuilder, indexMap); + } else if (type instanceof PrimitiveDataType) { + docTypeBuildOneType((PrimitiveDataType) type, documentBuilder, indexMap); + } else if (type instanceof DocumentType) { + throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName() + " id "+type.getId()); + } else { + throw new IllegalArgumentException("Can not create config for data type " + type + " of class " + type.getClass()); + } + } + + private void docTypeBuildImportedFields(Collection<String> fieldNames, DocumentmanagerConfig.Doctype.Builder builder) { + for (String fieldName : fieldNames) { + builder.importedfield(ib -> ib.name(fieldName)); + } + } + + private void docTypeBuildOneType(StructDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + var structBuilder = new DocumentmanagerConfig.Doctype.Structtype.Builder(); + structBuilder + .idx(indexMap.idxOf(type)) + .name(type.getName()); + for (DataType inherited : type.getInheritedTypes()) { + structBuilder.inherits(inheritBuilder -> inheritBuilder + .type(indexMap.idxOf(inherited))); + docTypeBuildAnyType(inherited, builder, indexMap); + } + for (com.yahoo.document.Field field : type.getFieldsThisTypeOnly()) { + DataType fieldType = field.getDataType(); + structBuilder.field(fieldBuilder -> fieldBuilder + .name(field.getName()) + .internalid(field.getId()) + .type(indexMap.idxOf(fieldType))); + docTypeBuildAnyType(fieldType, builder, indexMap); + } + builder.structtype(structBuilder); + } + + private void docTypeBuildOneType(PrimitiveDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + builder.primitivetype(primBuilder -> primBuilder + .idx(indexMap.idxOf(type)) + .name(type.getName())); + } + + private void docTypeBuildOneType(TensorDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + var tt = type.getTensorType(); + String detailed = (tt != null) ? tt.toString() : "tensor"; + builder.tensortype(tensorBuilder -> tensorBuilder + .idx(indexMap.idxOf(type)) + .detailedtype(detailed)); + + } + + private void docTypeBuildOneType(ArrayDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + DataType nested = type.getNestedType(); + builder.arraytype(arrayBuilder -> arrayBuilder + .idx(indexMap.idxOf(type)) + .elementtype(indexMap.idxOf(nested))); + docTypeBuildAnyType(nested, builder, indexMap); + } + + private void docTypeBuildOneType(WeightedSetDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + DataType nested = type.getNestedType(); + builder.wsettype(wsetBuilder -> wsetBuilder + .idx(indexMap.idxOf(type)) + .elementtype(indexMap.idxOf(nested)) + .createifnonexistent(type.createIfNonExistent()) + .removeifzero(type.removeIfZero())); + docTypeBuildAnyType(nested, builder, indexMap); + } + + private void docTypeBuildOneType(MapDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + DataType keytype = type.getKeyType(); + DataType valtype = type.getValueType(); + builder.maptype(mapBuilder -> mapBuilder + .idx(indexMap.idxOf(type)) + .keytype(indexMap.idxOf(keytype)) + .valuetype(indexMap.idxOf(valtype))); + docTypeBuildAnyType(keytype, builder, indexMap); + docTypeBuildAnyType(valtype, builder, indexMap); + } + + private void docTypeBuildOneType(AnnotationReferenceDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + builder.annotationref(arefBuilder -> arefBuilder + .idx(indexMap.idxOf(type)) + .annotationtype(indexMap.idxOf(type.getAnnotationType()))); + } + + private void docTypeBuildOneType(ReferenceDataType type, + DocumentmanagerConfig.Doctype.Builder builder, + IdxMap indexMap) + { + builder.documentref(docrefBuilder -> docrefBuilder + .idx(indexMap.idxOf(type)) + .targettype(indexMap.idxOf(type.getTargetType()))); + + } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentTypes.java b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentTypes.java index 3b0b63f277e..ac1b92e287f 100644 --- a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentTypes.java +++ b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentTypes.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.configmodel.producers; import com.yahoo.document.*; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.annotation.AnnotationReferenceDataType; import com.yahoo.document.annotation.AnnotationType; import com.yahoo.documentmodel.DataTypeCollection; @@ -15,8 +16,15 @@ import java.util.*; * @author baldersheim */ public class DocumentTypes { + private boolean useV8GeoPositions = false; + + public DocumentTypes useV8GeoPositions(boolean value) { + this.useV8GeoPositions = value; + return this; + } public DocumenttypesConfig.Builder produce(DocumentModel model, DocumenttypesConfig.Builder builder) { + builder.usev8geopositions(this.useV8GeoPositions); Map<NewDocumentType.Name, NewDocumentType> produced = new HashMap<>(); for (NewDocumentType documentType : model.getDocumentManager().getTypes()) { produceInheritOrder(documentType, builder, produced); @@ -61,7 +69,7 @@ public class DocumentTypes { builder.documenttype(db); } - private void buildConfig(Set<FieldSet> fieldSets, com.yahoo.document.DocumenttypesConfig.Documenttype.Builder db) { + private void buildConfig(Set<FieldSet> fieldSets, DocumenttypesConfig.Documenttype.Builder db) { for (FieldSet fs : fieldSets) { buildConfig(fs, db); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java index f584b4cd207..13b0f6216b2 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java @@ -194,6 +194,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri HostSystem hostSystem = root.hostSystem(); if (complete) { // create a completed, frozen model + root.useFeatureFlags(deployState.getProperties().featureFlags()); configModelRepo.readConfigModels(deployState, this, builder, root, new VespaConfigModelRegistry(configModelRegistry)); addServiceClusters(deployState, builder); setupRouting(deployState); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java index a65e6fe16c0..12f0f717a19 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java @@ -165,6 +165,7 @@ public class VespaMetricSet { metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); + addMetric(metrics, "httpapi_condition_not_met", List.of("rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); @@ -194,8 +195,8 @@ public class VespaMetricSet { metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); - metrics.add(new Metric("http.status.401.rate")); - metrics.add(new Metric("http.status.403.rate")); + metrics.add(new Metric("http.status.401.rate")); // TODO: Remove in Vespa 8 + metrics.add(new Metric("http.status.403.rate")); // TODO: Remove in Vespa 8 metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); @@ -428,6 +429,7 @@ public class VespaMetricSet { addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); + addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); // jobs metrics.add(new Metric("content.proton.documentdb.job.total.average")); @@ -582,6 +584,15 @@ public class VespaMetricSet { metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); + // feeding + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); + return metrics; } @@ -617,6 +628,12 @@ public class VespaMetricSet { metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); // TODO: Remove in Vespa 8 + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AwsAccessControlValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AwsAccessControlValidator.java index e5c8a27debf..a07e07169d1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AwsAccessControlValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AwsAccessControlValidator.java @@ -31,7 +31,7 @@ public class AwsAccessControlValidator extends Validator { || ! http.getAccessControl().get().writeEnabled || ! http.getAccessControl().get().readEnabled) - if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) + if (hasHandlerThatNeedsProtection(cluster)) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidator.java index 43bf8133c74..e2b08a621d1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidator.java @@ -3,9 +3,9 @@ package com.yahoo.vespa.model.application.validation; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.document.DataType; -import com.yahoo.document.PositionDataType; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.document.ComplexAttributeFieldUtils; +import com.yahoo.searchdefinition.document.GeoPos; import com.yahoo.searchdefinition.document.ImmutableSDField; import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.search.AbstractSearchCluster; @@ -62,8 +62,7 @@ public class ComplexAttributeFieldsValidator extends Validator { private static boolean isSupportedComplexField(ImmutableSDField field) { return (ComplexAttributeFieldUtils.isSupportedComplexField(field) || - field.getDataType().equals(PositionDataType.INSTANCE) || - field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); + GeoPos.isAnyPos(field)); } private static String toString(ImmutableSDField field) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java index 10d97cbb58c..d1dc2b84c8a 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java @@ -39,7 +39,7 @@ public class QuotaValidator extends Validator { var maxSpend = model.allClusters().stream() .filter(id -> !adminClusterIds(model).contains(id)) .map(id -> model.provisioned().all().getOrDefault(id, zeroCapacity)) - .mapToDouble(c -> c.maxResources().cost()) + .mapToDouble(c -> c.maxResources().cost()) // TODO: This may be unspecified -> 0 .sum(); var actualSpend = model.allocatedHosts().getHosts().stream() diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java index 25a570e44a2..14fb903a547 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java @@ -41,7 +41,7 @@ public class ClusterSizeReductionValidator implements ChangeValidator { int currentSize = current.minResources().nodes(); int nextSize = next.minResources().nodes(); // don't allow more than 50% reduction, but always allow to reduce size with 1 - if ( nextSize < ((double)currentSize) * 0.5 && nextSize != currentSize - 1) + if ( nextSize < currentSize * 0.5 && nextSize != currentSize - 1) overrides.invalid(ValidationId.clusterSizeReduction, "Size reduction in '" + clusterId.value() + "' is too large: " + "New min size must be at least 50% of the current min size. " + diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java index a43c5b71903..8c333a099d0 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java @@ -27,24 +27,29 @@ import java.util.stream.Collectors; public class IndexedSearchClusterChangeValidator implements ChangeValidator { @Override - public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) { + public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, + ValidationOverrides overrides, Instant now) { List<ConfigChangeAction> result = new ArrayList<>(); for (Map.Entry<String, ContentCluster> currentEntry : current.getContentClusters().entrySet()) { ContentCluster nextCluster = next.getContentClusters().get(currentEntry.getKey()); if (nextCluster != null && nextCluster.getSearch().hasIndexedCluster()) { - result.addAll(validateContentCluster(currentEntry.getValue(), nextCluster)); + result.addAll(validateContentCluster(currentEntry.getValue(), nextCluster, overrides, now)); } } return result; } private static List<ConfigChangeAction> validateContentCluster(ContentCluster currentCluster, - ContentCluster nextCluster) { - return validateDocumentDatabases(currentCluster, nextCluster); + ContentCluster nextCluster, + ValidationOverrides overrides, + Instant now) { + return validateDocumentDatabases(currentCluster, nextCluster, overrides, now); } private static List<ConfigChangeAction> validateDocumentDatabases(ContentCluster currentCluster, - ContentCluster nextCluster) { + ContentCluster nextCluster, + ValidationOverrides overrides, + Instant now) { List<ConfigChangeAction> result = new ArrayList<>(); for (DocumentDatabase currentDb : getDocumentDbs(currentCluster.getSearch())) { String docTypeName = currentDb.getName(); @@ -52,7 +57,7 @@ public class IndexedSearchClusterChangeValidator implements ChangeValidator { filter(db -> db.getName().equals(docTypeName)).findFirst(); if (nextDb.isPresent()) { result.addAll(validateDocumentDatabase(currentCluster, nextCluster, docTypeName, - currentDb, nextDb.get())); + currentDb, nextDb.get(), overrides, now)); } } return result; @@ -62,11 +67,19 @@ public class IndexedSearchClusterChangeValidator implements ChangeValidator { ContentCluster nextCluster, String docTypeName, DocumentDatabase currentDb, - DocumentDatabase nextDb) { + DocumentDatabase nextDb, + ValidationOverrides overrides, + Instant now) { NewDocumentType currentDocType = currentCluster.getDocumentDefinitions().get(docTypeName); NewDocumentType nextDocType = nextCluster.getDocumentDefinitions().get(docTypeName); List<VespaConfigChangeAction> result = - new DocumentDatabaseChangeValidator(currentCluster.id(), currentDb, currentDocType, nextDb, nextDocType).validate(); + new DocumentDatabaseChangeValidator(currentCluster.id(), + currentDb, + currentDocType, + nextDb, + nextDocType, + overrides, + now).validate(); return modifyActions(result, getSearchNodeServices(nextCluster.getSearch().getIndexed()), docTypeName); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java index 431bca3fb5a..1957c52e841 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; +import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.documentmodel.NewDocumentType; import com.yahoo.searchdefinition.derived.AttributeFields; @@ -12,6 +14,7 @@ import com.yahoo.searchdefinition.document.HnswIndexParams; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import com.yahoo.vespa.model.application.validation.change.VespaRestartAction; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -33,6 +36,8 @@ public class AttributeChangeValidator { private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; + private final ValidationOverrides overrides; + private final Instant now; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, @@ -40,7 +45,9 @@ public class AttributeChangeValidator { NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, - NewDocumentType nextDocType) { + NewDocumentType nextDocType, + ValidationOverrides overrides, + Instant now) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; @@ -48,6 +55,8 @@ public class AttributeChangeValidator { this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; + this.overrides = overrides; + this.now = now; } public List<VespaConfigChangeAction> validate() { @@ -97,23 +106,23 @@ public class AttributeChangeValidator { private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); - for (Attribute nextAttr : nextFields.attributes()) { - Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); - if (currAttr != null) { - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isPaged, "paged", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); - - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); - if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { - validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); - validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); + for (Attribute next : nextFields.attributes()) { + Attribute current = currentFields.getAttribute(next.getName()); + if (current != null) { + validateAttributeSetting(id, current, next, Attribute::isFastSearch, "fast-search", result); + validateAttributeSetting(id, current, next, Attribute::isFastAccess, "fast-access", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); + validateAttributeSetting(id, current, next, Attribute::isHuge, "huge", result); + validateAttributeSetting(id, current, next, Attribute::isPaged, "paged", result); + validatePagedAttributeRemoval(current, next); + validateAttributeSetting(id, current, next, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); + validateAttributeSetting(id, current, next, Attribute::isEnabledOnlyBitVector, "rank: filter", result); + validateAttributeSetting(id, current, next, Attribute::distanceMetric, "distance-metric", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); + if (hasHnswIndex(current) && hasHnswIndex(next)) { + validateAttributeHnswIndexSetting(id, current, next, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); + validateAttributeHnswIndexSetting(id, current, next, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } @@ -132,14 +141,14 @@ public class AttributeChangeValidator { } private static <T> void validateAttributeSetting(ClusterSpec.Id id, - Attribute currentAttr, Attribute nextAttr, + Attribute current, Attribute next, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { - T currentValue = settingValueProvider.apply(currentAttr); - T nextValue = settingValueProvider.apply(nextAttr); + T currentValue = settingValueProvider.apply(current); + T nextValue = settingValueProvider.apply(next); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); - result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); + result.add(new VespaRestartAction(id, new ChangeMessageBuilder(next.getName()).addChange(message).build())); } } @@ -156,4 +165,13 @@ public class AttributeChangeValidator { } } + private void validatePagedAttributeRemoval(Attribute current, Attribute next) { + if (current.isPaged() && !next.isPaged()) { + overrides.invalid(ValidationId.pagedSettingRemoval, + current + "' has setting 'paged' removed. " + + "This may cause content nodes to run out of memory as the entire attribute is loaded into memory", + now); + } + } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java index ba0e0717c07..3481d2ce219 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java @@ -5,10 +5,9 @@ import java.util.ArrayList; import java.util.List; /** - * Class used to build a message describing the changes in a given field. + * Builder of a message describing the changes in a given field. * * @author geirst - * @since 2014-12-09 */ public class ChangeMessageBuilder { @@ -20,10 +19,7 @@ public class ChangeMessageBuilder { } public String build() { - StringBuilder retval = new StringBuilder(); - retval.append("Field '" + fieldName + "' changed: "); - retval.append(String.join(", ", changes)); - return retval.toString(); + return "Field '" + fieldName + "' changed: " + String.join(", ", changes); } public ChangeMessageBuilder addChange(String component, String from, String to) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java index be2f49085b2..bff337adfb6 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java @@ -23,17 +23,23 @@ public class DocumentDatabaseChangeValidator { private final NewDocumentType currentDocType; private final DocumentDatabase nextDatabase; private final NewDocumentType nextDocType; + private final ValidationOverrides overrides; + private final Instant now; public DocumentDatabaseChangeValidator(ClusterSpec.Id id, DocumentDatabase currentDatabase, NewDocumentType currentDocType, DocumentDatabase nextDatabase, - NewDocumentType nextDocType) { + NewDocumentType nextDocType, + ValidationOverrides overrides, + Instant now) { this.id = id; this.currentDatabase = currentDatabase; this.currentDocType = currentDocType; this.nextDatabase = nextDatabase; this.nextDocType = nextDocType; + this.overrides = overrides; + this.now = now; } public List<VespaConfigChangeAction> validate() { @@ -50,7 +56,8 @@ public class DocumentDatabaseChangeValidator { currentDatabase.getDerivedConfiguration().getAttributeFields(), currentDatabase.getDerivedConfiguration().getIndexSchema(), currentDocType, nextDatabase.getDerivedConfiguration().getAttributeFields(), - nextDatabase.getDerivedConfiguration().getIndexSchema(), nextDocType) + nextDatabase.getDerivedConfiguration().getIndexSchema(), nextDocType, + overrides, now) .validate(); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java index ce8347b66c1..5e258fde821 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java @@ -14,7 +14,6 @@ import com.yahoo.vespa.documentmodel.SummaryTransform; * This message should be more descriptive for the end-user than just seeing the changed indexing script. * * @author geirst - * @since 2014-12-09 */ public class IndexingScriptChangeMessageBuilder { @@ -57,7 +56,7 @@ public class IndexingScriptChangeMessageBuilder { private void checkStemming(ChangeMessageBuilder builder) { Stemming currentStemming = currentField.getStemming(currentSchema); Stemming nextStemming = nextField.getStemming(nextSchema); - if (!currentStemming.equals(nextStemming)) { + if (currentStemming != nextStemming) { builder.addChange("stemming", currentStemming.getName(), nextStemming.getName()); } } @@ -65,7 +64,7 @@ public class IndexingScriptChangeMessageBuilder { private void checkNormalizing(ChangeMessageBuilder builder) { NormalizeLevel.Level currentLevel = currentField.getNormalizing().getLevel(); NormalizeLevel.Level nextLevel = nextField.getNormalizing().getLevel(); - if (!currentLevel.equals(nextLevel)) { + if (currentLevel != nextLevel) { builder.addChange("normalizing", currentLevel.toString(), nextLevel.toString()); } } @@ -77,7 +76,7 @@ public class IndexingScriptChangeMessageBuilder { if (currentSummaryField != null) { SummaryTransform currentTransform = currentSummaryField.getTransform(); SummaryTransform nextTransform = nextSummaryField.getTransform(); - if (!currentSummaryField.getTransform().equals(nextSummaryField.getTransform())) { + if (currentSummaryField.getTransform() != nextSummaryField.getTransform()) { builder.addChange("summary field '" + fieldName + "' transform", currentTransform.getName(), nextTransform.getName()); } @@ -88,7 +87,7 @@ public class IndexingScriptChangeMessageBuilder { private static String toString(Matching matching) { Matching.Type type = matching.getType(); String retval = type.getName(); - if (type.equals(Matching.Type.GRAM)) { + if (type == Matching.Type.GRAM) { retval += " (size " + matching.getGramSize() + ")"; } return retval; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java index e64a3d44bba..f6f6b6abdee 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java @@ -64,8 +64,7 @@ public class IndexingScriptChangeValidator { return Optional.empty(); } - static boolean equalScripts(ScriptExpression currentScript, - ScriptExpression nextScript) { + static boolean equalScripts(ScriptExpression currentScript, ScriptExpression nextScript) { // Output expressions are specifying in which context a field value is used (attribute, index, summary), // and do not affect how the field value is generated in the indexing doc proc. // The output expressions are therefore removed before doing the comparison. diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java index acb404a051b..a10d2c36de1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; -import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.document.ArrayDataType; import com.yahoo.document.DataType; @@ -15,7 +14,6 @@ import com.yahoo.searchdefinition.document.ComplexAttributeFieldUtils; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import com.yahoo.vespa.model.application.validation.change.VespaRestartAction; -import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -24,7 +22,7 @@ import java.util.stream.Collectors; /** * Validates the changes between the current and next set of struct field attributes in a document database. - + * * Complex fields of the following types are considered (as they might have struct field attributes): * - array of simple struct * - map of simple struct @@ -53,7 +51,7 @@ public class StructFieldAttributeChangeValidator { } public List<VespaConfigChangeAction> validate() { - List<VespaConfigChangeAction> result = new ArrayList(); + List<VespaConfigChangeAction> result = new ArrayList<>(); for (Field currentField : currentDocType.getAllFields()) { Field nextField = nextDocType.getField(currentField.getName()); if (nextField != null) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/AccessControlOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/AccessControlOnFirstDeploymentValidator.java index a64f93a046a..dd6e6ad590d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/AccessControlOnFirstDeploymentValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/AccessControlOnFirstDeploymentValidator.java @@ -38,7 +38,7 @@ public class AccessControlOnFirstDeploymentValidator extends Validator { || ! cluster.getHttp().getAccessControl().isPresent() || ! cluster.getHttp().getAccessControl().get().writeEnabled) - if (hasHandlerThatNeedsProtection(cluster) || ! cluster.getAllServlets().isEmpty()) + if (hasHandlerThatNeedsProtection(cluster)) offendingClusters.add(cluster.getName()); } if (! offendingClusters.isEmpty()) diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java index 636a3f44369..e85bbba2dca 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java @@ -31,8 +31,7 @@ public class RedundancyOnFirstDeploymentValidator extends Validator { if ( ! deployState.zone().environment().isProduction()) return; for (ContentCluster cluster : model.getContentClusters().values()) { - if (cluster.redundancy().finalRedundancy() == 1 - && cluster.redundancy().totalNodes() > cluster.redundancy().groups()) + if (cluster.redundancy().finalRedundancy() == 1 && cluster.redundancy().groups() == 1) deployState.validationOverrides().invalid(ValidationId.redundancyOne, cluster + " has redundancy 1, which will cause it to lose data " + "if a node fails. This requires an override on first deployment " + diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ServletBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ServletBuilder.java deleted file mode 100644 index fc3002ab3e3..00000000000 --- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ServletBuilder.java +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.model.builder.xml.dom; - -import com.yahoo.config.model.deploy.DeployState; -import com.yahoo.config.model.producer.AbstractConfigProducer; -import com.yahoo.osgi.provider.model.ComponentModel; -import com.yahoo.text.XML; -import com.yahoo.vespa.model.container.component.Servlet; -import com.yahoo.vespa.model.container.component.ServletProvider; -import com.yahoo.vespa.model.container.component.SimpleComponent; -import com.yahoo.vespa.model.container.xml.BundleInstantiationSpecificationBuilder; -import org.w3c.dom.Element; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author stiankri - * @since 5.32 - */ -public class ServletBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Servlet> { - @Override - protected ServletProvider doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element servletElement) { - SimpleComponent servlet = createServletComponent(servletElement); - ServletProvider servletProvider = createServletProvider(servletElement, servlet); - - return servletProvider; - } - - private SimpleComponent createServletComponent(Element servletElement) { - ComponentModel componentModel = new ComponentModel(BundleInstantiationSpecificationBuilder.build(servletElement)); - return new SimpleComponent(componentModel); - } - - private ServletProvider createServletProvider(Element servletElement, SimpleComponent servlet) { - Map<String, String> servletConfig = getServletConfig(servletElement); - return new ServletProvider(servlet, getPath(servletElement), servletConfig); - } - - private String getPath(Element servletElement) { - Element pathElement = XML.getChild(servletElement, "path"); - return XML.getValue(pathElement); - } - - private Map<String, String> getServletConfig(Element servletElement) { - Map<String, String> servletConfig = new HashMap<>(); - - Element servletConfigElement = XML.getChild(servletElement, "servlet-config"); - XML.getChildren(servletConfigElement).forEach( parameter -> - servletConfig.put(parameter.getTagName(), XML.getValue(parameter)) - ); - - return servletConfig; - } -} - diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java index f7e8afc2d94..e8be43fdc96 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java @@ -23,7 +23,6 @@ import com.yahoo.container.handler.metrics.MetricsV2Handler; import com.yahoo.container.handler.metrics.PrometheusV1Handler; import com.yahoo.container.jdisc.ContainerMbusConfig; import com.yahoo.container.jdisc.messagebus.MbusServerProvider; -import com.yahoo.jdisc.http.ServletPathsConfig; import com.yahoo.osgi.provider.model.ComponentModel; import com.yahoo.search.config.QrStartConfig; import com.yahoo.vespa.config.search.RankProfilesConfig; @@ -34,9 +33,7 @@ import com.yahoo.vespa.model.AbstractService; import com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer; import com.yahoo.vespa.model.container.component.BindingPattern; import com.yahoo.vespa.model.container.component.Component; -import com.yahoo.vespa.model.container.component.ConfigProducerGroup; import com.yahoo.vespa.model.container.component.Handler; -import com.yahoo.vespa.model.container.component.Servlet; import com.yahoo.vespa.model.container.component.SystemBindingPattern; import com.yahoo.vespa.model.container.configserver.ConfigserverCluster; import com.yahoo.vespa.model.utils.FileSender; @@ -45,11 +42,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -import java.util.stream.Stream; + +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.shared; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.sharedLayer4; /** * A container cluster that is typically set up from the user application. @@ -63,7 +61,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Producer, - ServletPathsConfig.Producer, ContainerMbusConfig.Producer, MetricsProxyApiConfig.Producer, ZookeeperServerConfig.Producer, @@ -83,7 +80,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat private final Set<FileReference> applicationBundles = new LinkedHashSet<>(); - private final ConfigProducerGroup<Servlet> servletGroup; private final Set<String> previousHosts; private ContainerModelEvaluation modelEvaluation; @@ -100,7 +96,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true); this.tlsClientAuthority = deployState.tlsClientAuthority(); - servletGroup = new ConfigProducerGroup<>(this, "servlet"); previousHosts = deployState.getPreviousModel().stream() .map(Model::allocatedHosts) .map(AllocatedHosts::getHosts) @@ -169,22 +164,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat this.modelEvaluation = modelEvaluation; } - public Map<ComponentId, Servlet> getServletMap() { - return servletGroup.getComponentMap(); - } - - public void addServlet(Servlet servlet) { - servletGroup.addComponent(servlet.getGlobalComponentId(), servlet); - } - - public Collection<Servlet> getAllServlets() { - return allServlets().collect(Collectors.toCollection(ArrayList::new)); - } - - private Stream<Servlet> allServlets() { - return servletGroup.getComponents().stream(); - } - public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } @@ -210,6 +189,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat for(String suffix : deployState.getProperties().zoneDnsSuffixes()) { // L4 ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom( + deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); @@ -223,6 +203,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat // L7 ApplicationClusterEndpoint.DnsName l7Name = ApplicationClusterEndpoint.DnsName.sharedNameFrom( + deployState.zone().system(), ClusterSpec.Id.from(getName()), deployState.getProperties().applicationId(), suffix); @@ -235,14 +216,17 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat .build()); } - // Then get all endpoints provided by controller. Can be created with L4 routing only + // Then get all endpoints provided by controller. + Set<ApplicationClusterEndpoint.RoutingMethod> supportedRoutingMethods = Set.of(shared, sharedLayer4); Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints(); endpointsFromController.stream() .filter(ce -> ce.clusterId().equals(getName())) + .filter(ce -> supportedRoutingMethods.contains(ce.routingMethod())) .forEach(ce -> ce.names().forEach( name -> endpoints.add(ApplicationClusterEndpoint.builder() .scope(ce.scope()) - .sharedL4Routing() + .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) // Default to weight=1 if not set + .routingMethod(ce.routingMethod()) .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) .hosts(hosts) .clusterId(getName()) @@ -258,14 +242,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat } @Override - public void getConfig(ServletPathsConfig.Builder builder) { - allServlets().forEach(servlet -> - builder.servlets(servlet.getComponentId().stringValue(), - servlet.toConfigBuilder()) - ); - } - - @Override public void getConfig(RankProfilesConfig.Builder builder) { if (modelEvaluation != null) modelEvaluation.getConfig(builder); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java index 81dd458570b..c4f506d81ba 100755 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java @@ -25,7 +25,6 @@ import com.yahoo.container.jdisc.state.StateHandler; import com.yahoo.container.logging.AccessLog; import com.yahoo.container.usability.BindingsOverviewHandler; import com.yahoo.document.config.DocumentmanagerConfig; -import com.yahoo.jdisc.http.filter.SecurityFilterInvoker; import com.yahoo.metrics.simple.runtime.MetricProperties; import com.yahoo.osgi.provider.model.ComponentModel; import com.yahoo.prelude.semantics.SemanticRulesConfig; @@ -114,8 +113,8 @@ public abstract class ContainerCluster<CONTAINER extends Container> public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); - public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; + public static final String PARALLEL_GC = "-XX:+UseParallelGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); @@ -176,7 +175,6 @@ public abstract class ContainerCluster<CONTAINER extends Container> addSimpleComponent(AccessLog.class); addComponent(new DefaultThreadpoolProvider(this, deployState.featureFlags().metricsproxyNumThreads())); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); - addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/Servlet.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/Servlet.java deleted file mode 100644 index 667b0759f86..00000000000 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/Servlet.java +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.model.container.component; - -import com.yahoo.jdisc.http.ServletPathsConfig; -import com.yahoo.osgi.provider.model.ComponentModel; - -/** - * @author stiankri - */ -public class Servlet extends SimpleComponent { - public final String bindingPath; - - public Servlet(ComponentModel componentModel, String bindingPath) { - super(componentModel); - this.bindingPath = bindingPath; - } - - public ServletPathsConfig.Servlets.Builder toConfigBuilder() { - return new ServletPathsConfig.Servlets.Builder() - .path(bindingPath); - } -} diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/ServletProvider.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/ServletProvider.java deleted file mode 100644 index 8c0ec0e8a6f..00000000000 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/ServletProvider.java +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.model.container.component; - -import com.yahoo.component.ComponentId; -import com.yahoo.component.ComponentSpecification; -import com.yahoo.container.bundle.BundleInstantiationSpecification; -import com.yahoo.container.servlet.ServletConfigConfig; -import com.yahoo.osgi.provider.model.ComponentModel; - -import java.util.Map; - -/** - * @author stiankri - */ -public class ServletProvider extends Servlet implements ServletConfigConfig.Producer { - public static final String BUNDLE = "container-core"; - public static final String CLASS = "com.yahoo.container.servlet.ServletProvider"; - - private static final ComponentId SERVLET_PROVIDER_NAMESPACE = ComponentId.fromString("servlet-provider"); - private final Map<String, String> servletConfig; - - public ServletProvider(SimpleComponent servletToProvide, String bindingPath, Map<String, String> servletConfig) { - super(new ComponentModel( - new BundleInstantiationSpecification(servletToProvide.getComponentId().nestInNamespace(SERVLET_PROVIDER_NAMESPACE), - ComponentSpecification.fromString(CLASS), - ComponentSpecification.fromString(BUNDLE))), - bindingPath); - - inject(servletToProvide); - addChild(servletToProvide); - this.servletConfig = servletConfig; - } - - @Override - public void getConfig(ServletConfigConfig.Builder builder) { - builder.map(servletConfig); - } -} diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java index 43fa515e7c3..1c0601915e9 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java @@ -115,7 +115,10 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> throw new IllegalArgumentException( String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value())); } - deployState.getDeployLogger().logApplicationPackage(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon"); + deployState.getDeployLogger() + .logApplicationPackage(Level.WARNING, + "Domain in 'access-control' is deprecated and is no longer necessary. " + + "Please remove the 'domain' attribute from the 'access-control' element in services.xml."); } return tenantDomain != null ? tenantDomain : explicitDomain; } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java index d65fbba6a5e..6d184666bdb 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java @@ -8,7 +8,6 @@ import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.DeploymentInstanceSpec; import com.yahoo.config.application.api.DeploymentSpec; -import com.yahoo.config.application.api.Endpoint; import com.yahoo.config.model.ConfigModelContext; import com.yahoo.config.model.ConfigModelContext.ApplicationType; import com.yahoo.config.model.api.ApplicationClusterEndpoint; @@ -48,7 +47,6 @@ import com.yahoo.vespa.model.builder.xml.dom.DomComponentBuilder; import com.yahoo.vespa.model.builder.xml.dom.DomHandlerBuilder; import com.yahoo.vespa.model.builder.xml.dom.ModelElement; import com.yahoo.vespa.model.builder.xml.dom.NodesSpecification; -import com.yahoo.vespa.model.builder.xml.dom.ServletBuilder; import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder; import com.yahoo.vespa.model.builder.xml.dom.chains.docproc.DomDocprocChainsBuilder; import com.yahoo.vespa.model.builder.xml.dom.chains.processing.DomProcessingBuilder; @@ -102,6 +100,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Consumer; +import java.util.logging.Level; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -211,7 +210,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { addServerProviders(deployState, spec, cluster); // Must be added after nodes: - addAthensCopperArgos(cluster, context); + addDeploymentSpecConfig(cluster, context, deployState.getDeployLogger()); addZooKeeper(cluster, spec); addParameterStoreValidationHandler(cluster, deployState); @@ -309,19 +308,23 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { cluster.addComponent(cloudSecretStore); } - private void addAthensCopperArgos(ApplicationContainerCluster cluster, ConfigModelContext context) { + private void addDeploymentSpecConfig(ApplicationContainerCluster cluster, ConfigModelContext context, DeployLogger deployLogger) { if ( ! context.getDeployState().isHosted()) return; - app.getDeployment().map(DeploymentSpec::fromXml) - .ifPresent(deploymentSpec -> { - addIdentityProvider(cluster, - context.getDeployState().getProperties().configServerSpecs(), - context.getDeployState().getProperties().loadBalancerName(), - context.getDeployState().getProperties().ztsUrl(), - context.getDeployState().getProperties().athenzDnsSuffix(), - context.getDeployState().zone(), - deploymentSpec); - addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec); - }); + Optional<DeploymentSpec> deploymentSpec = app.getDeployment().map(DeploymentSpec::fromXml); + if (deploymentSpec.isEmpty()) return; + + for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) { + deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString()); + } + + addIdentityProvider(cluster, + context.getDeployState().getProperties().configServerSpecs(), + context.getDeployState().getProperties().loadBalancerName(), + context.getDeployState().getProperties().ztsUrl(), + context.getDeployState().getProperties().athenzDnsSuffix(), + context.getDeployState().zone(), + deploymentSpec.get()); + addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getEndpoints(), deploymentSpec.get()); } private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { @@ -524,9 +527,10 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { return http; } + // TODO Vespa 8: Remove private void addServlets(DeployState deployState, Element spec, ApplicationContainerCluster cluster) { - for (Element servletElem : XML.getChildren(spec, "servlet")) - cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem)); + if (XML.getChildren(spec, "servlet").size() > 0) + throw new IllegalArgumentException("The 'servlet' tag is no longer supported in services.xml. Please use a handler instead."); } private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) { @@ -661,59 +665,54 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { static boolean incompatibleGCOptions(String jvmargs) { Pattern gcAlgorithm = Pattern.compile("-XX:[-+]Use.+GC"); Pattern cmsArgs = Pattern.compile("-XX:[-+]*CMS"); - return (gcAlgorithm.matcher(jvmargs).find() ||cmsArgs.matcher(jvmargs).find()); - } - - private static String buildJvmGCOptions(DeployState deployState, String jvmGCOPtions) { - String options = (jvmGCOPtions != null) - ? jvmGCOPtions - : deployState.getProperties().jvmGCOptions(); - return (options == null || options.isEmpty()) - ? (deployState.isHosted() ? ContainerCluster.CMS : ContainerCluster.G1GC) - : options; - } - - private static String getJvmOptions(ApplicationContainerCluster cluster, Element nodesElement, DeployLogger deployLogger) { - String jvmOptions; - if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { - jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); - if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { - String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); - throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + - " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into jvm-options."); - } - } else { - jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); - if (incompatibleGCOptions(jvmOptions)) { - deployLogger.logApplicationPackage(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'"); - cluster.setJvmGCOptions(ContainerCluster.G1GC); - } - } - return jvmOptions; + return (gcAlgorithm.matcher(jvmargs).find() || cmsArgs.matcher(jvmargs).find()); + } + + private static String buildJvmGCOptions(ConfigModelContext context, String jvmGCOptions) { + return new JvmGcOptions(context.getDeployState(), jvmGCOptions).build(); + } + + private static String getJvmOptions(ApplicationContainerCluster cluster, + Element nodesElement, + DeployState deployState, + boolean legacyOptions) { + return new JvmOptions(cluster, nodesElement, deployState, legacyOptions).build(); } private static String extractAttribute(Element element, String attrName) { return element.hasAttribute(attrName) ? element.getAttribute(attrName) : null; } - void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, - Element nodesElement, ConfigModelContext context) { - applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployLogger())); + private void extractJvmOptions(List<ApplicationContainer> nodes, + ApplicationContainerCluster cluster, + Element nodesElement, + ConfigModelContext context) { + Element jvmElement = XML.getChild(nodesElement, "jvm"); + if (jvmElement == null) { + extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); + } else { + extractJvmTag(nodes, cluster, nodesElement, jvmElement, context); + } + } + + private void extractJvmFromLegacyNodesTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, + Element nodesElement, ConfigModelContext context) { + applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), true)); if (cluster.getJvmGCOptions().isEmpty()) { String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS); - cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); + cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions)); } applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); } - void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, - Element jvmElement, ConfigModelContext context) { - applyNodesTagJvmArgs(nodes, jvmElement.getAttribute(VespaDomBuilder.OPTIONS)); + private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster, + Element nodesElement, Element jvmElement, ConfigModelContext context) { + applyNodesTagJvmArgs(nodes, getJvmOptions(cluster, nodesElement, context.getDeployState(), false)); applyMemoryPercentage(cluster, jvmElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)); String jvmGCOptions = extractAttribute(jvmElement, VespaDomBuilder.GC_OPTIONS); - cluster.setJvmGCOptions(buildJvmGCOptions(context.getDeployState(), jvmGCOptions)); + cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions)); } /** @@ -730,12 +729,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { } else { List<ApplicationContainer> nodes = createNodes(cluster, containerElement, nodesElement, context); - Element jvmElement = XML.getChild(nodesElement, "jvm"); - if (jvmElement == null) { - extractJvmFromLegacyNodesTag(nodes, cluster, nodesElement, context); - } else { - extractJvmTag(nodes, cluster, jvmElement, context); - } + extractJvmOptions(nodes, cluster, nodesElement, context); applyRoutingAliasProperties(nodes, cluster); applyDefaultPreload(nodes, nodesElement); String environmentVars = getEnvironmentVariables(XML.getChild(nodesElement, ENVIRONMENT_VARIABLES_ELEMENT)); @@ -1074,4 +1068,97 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { return CONTAINER_TAG.equals(element.getTagName()) || DEPRECATED_CONTAINER_TAG.equals(element.getTagName()); } + private static class JvmOptions { + + private final ContainerCluster<?> cluster; + private final Element nodesElement; + private final DeployLogger logger; + private final boolean isHosted; + private final boolean legacyOptions; + + public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) { + this.cluster = cluster; + this.nodesElement = nodesElement; + this.logger = deployState.getDeployLogger(); + this.isHosted = deployState.isHosted(); + this.legacyOptions = legacyOptions; + } + + String build() { + if (legacyOptions) + return buildLegacyOptions(); + + Element jvmElement = XML.getChild(nodesElement, "jvm"); + if (jvmElement == null) return ""; + String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS); + if (jvmOptions == null) return ""; + log(jvmOptions); + return jvmOptions; + } + + String buildLegacyOptions() { + String jvmOptions; + if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { + jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); + log(jvmOptions); + if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { + String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); + throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + + " and deprecated jvmargs='" + jvmArgs + + "'. Merge jvmargs into 'options' in 'jvm' element." + + " See https://docs.vespa.ai/en/reference/services-container.html#jvm"); + } + } else { + jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); + log(jvmOptions); + if (incompatibleGCOptions(jvmOptions)) { + logger.logApplicationPackage(WARNING, "You need to move your GC-related options from deprecated 'jvmargs' to 'gc-options' in 'jvm' element." + + " See https://docs.vespa.ai/en/reference/services-container.html#jvm"); + cluster.setJvmGCOptions(ContainerCluster.G1GC); + } + } + return jvmOptions; + } + + private void log(String jvmOptions) { + if (isHosted && jvmOptions != null && !jvmOptions.isEmpty()) + logger.logApplicationPackage(Level.INFO, "JVM options from services.xml: " + jvmOptions); + } + } + + private static class JvmGcOptions { + + private final DeployState deployState; + private final String jvmGcOptions; + private final DeployLogger logger; + private final boolean isHosted; + + public JvmGcOptions(DeployState deployState, String jvmGcOptions) { + this.deployState = deployState; + this.jvmGcOptions = jvmGcOptions; + this.logger = deployState.getDeployLogger(); + this.isHosted = deployState.isHosted(); + } + + private String build() { + String options = deployState.getProperties().jvmGCOptions(); + if (jvmGcOptions != null) { + log(jvmGcOptions); + options = jvmGcOptions; + // TODO: Verify options against lists of allowed and/or disallowed options + } + + if (options == null || options.isEmpty()) + options = deployState.isHosted() ? ContainerCluster.PARALLEL_GC : ContainerCluster.G1GC; + + return options; + } + + private void log(String jvmGcOptions) { + if (isHosted) + logger.logApplicationPackage(Level.INFO, "JVM GC options from services.xml: " + jvmGcOptions); + } + + } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java index d5500e7d040..13c3c229acb 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java @@ -435,13 +435,7 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster> redundancy.getConfig(builder); } - if ((feedSequencerType == ProtonConfig.Indexing.Optimize.Enum.THROUGHPUT) && (visibilityDelay == 0.0)) { - // THROUGHPUT and zero visibilityDelay is inconsistent and currently a suboptimal combination, defaulting to LATENCY. - // TODO: Once we have figured out optimal combination this limitation will be cleaned up. - builder.indexing.optimize(ProtonConfig.Indexing.Optimize.Enum.LATENCY); - } else { - builder.indexing.optimize(feedSequencerType); - } + builder.indexing.optimize(feedSequencerType); builder.indexing.tasklimit(feedTaskLimit); builder.feeding.master_task_limit(feedMasterTaskLimit); builder.feeding.shared_field_writer_executor(sharedFieldWriterExecutor); diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj index d3d992c11f5..92633f61e67 100644 --- a/config-model/src/main/javacc/SDParser.jj +++ b/config-model/src/main/javacc/SDParser.jj @@ -835,7 +835,7 @@ DataType dataType() : String typeName = null; boolean isArrayOldStyle = false; DataType mapType = null; - DataType arrayType = null; + DataType arrayType = null; DataType wsetType = null; TensorType tensorType; TemporaryStructuredDataType referenceType; diff --git a/config-model/src/main/resources/schema/deployment.rnc b/config-model/src/main/resources/schema/deployment.rnc index f24750bde8b..51a286a13c8 100644 --- a/config-model/src/main/resources/schema/deployment.rnc +++ b/config-model/src/main/resources/schema/deployment.rnc @@ -101,7 +101,7 @@ ProdTest = element test { } Region = element region { - attribute active { xsd:boolean } & + attribute active { xsd:boolean }? & attribute athenz-service { xsd:string }? & text } diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml index ac72067e9e7..738a3397aad 100644 --- a/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml +++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml @@ -4,6 +4,6 @@ <staging/> <prod global-service-id="query"> <region>us-east-3</region> - <region active="false">us-west-1</region> + <region invalid="invalid">us-west-1</region> </prod> </deployment> diff --git a/config-model/src/test/configmodel/types/documentmanager.cfg b/config-model/src/test/configmodel/types/documentmanager.cfg index f59dbeeb3ca..8b93e3a4665 100644 --- a/config-model/src/test/configmodel/types/documentmanager.cfg +++ b/config-model/src/test/configmodel/types/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 @@ -12,117 +13,117 @@ datatype[0].structtype[0].field[0].detailedtype "" datatype[0].structtype[0].field[1].name "y" datatype[0].structtype[0].field[1].datatype 0 datatype[0].structtype[0].field[1].detailedtype "" -datatype[1].id -1865479609 -datatype[1].maptype[0].keytype 2 -datatype[1].maptype[0].valtype 4 -datatype[2].id 294108848 -datatype[2].structtype[0].name "folder" -datatype[2].structtype[0].version 0 -datatype[2].structtype[0].compresstype NONE -datatype[2].structtype[0].compresslevel 0 -datatype[2].structtype[0].compressthreshold 95 -datatype[2].structtype[0].compressminsize 800 -datatype[2].structtype[0].field[0].name "Version" -datatype[2].structtype[0].field[0].datatype 0 -datatype[2].structtype[0].field[0].detailedtype "" -datatype[2].structtype[0].field[1].name "Name" -datatype[2].structtype[0].field[1].datatype 2 -datatype[2].structtype[0].field[1].detailedtype "" -datatype[2].structtype[0].field[2].name "FlagsCounter" -datatype[2].structtype[0].field[2].datatype -1865479609 -datatype[2].structtype[0].field[2].detailedtype "" -datatype[2].structtype[0].field[3].name "anotherfolder" -datatype[2].structtype[0].field[3].datatype 294108848 -datatype[2].structtype[0].field[3].detailedtype "" -datatype[3].id 109267174 -datatype[3].structtype[0].name "sct" -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].compresstype NONE -datatype[3].structtype[0].compresslevel 0 -datatype[3].structtype[0].compressthreshold 95 -datatype[3].structtype[0].compressminsize 800 -datatype[3].structtype[0].field[0].name "s1" -datatype[3].structtype[0].field[0].datatype 2 -datatype[3].structtype[0].field[0].detailedtype "" -datatype[3].structtype[0].field[1].name "s2" -datatype[3].structtype[0].field[1].datatype 2 -datatype[3].structtype[0].field[1].detailedtype "" -datatype[4].id 49942803 -datatype[4].arraytype[0].datatype 16 -datatype[5].id 339965458 -datatype[5].maptype[0].keytype 2 -datatype[5].maptype[0].valtype 2 -datatype[6].id -2092985853 -datatype[6].structtype[0].name "mystruct" -datatype[6].structtype[0].version 0 -datatype[6].structtype[0].compresstype NONE -datatype[6].structtype[0].compresslevel 0 -datatype[6].structtype[0].compressthreshold 95 -datatype[6].structtype[0].compressminsize 800 -datatype[6].structtype[0].field[0].name "bytearr" -datatype[6].structtype[0].field[0].datatype 49942803 -datatype[6].structtype[0].field[0].detailedtype "" -datatype[6].structtype[0].field[1].name "mymap" -datatype[6].structtype[0].field[1].datatype 339965458 -datatype[6].structtype[0].field[1].detailedtype "" -datatype[6].structtype[0].field[2].name "title" -datatype[6].structtype[0].field[2].datatype 2 -datatype[6].structtype[0].field[2].detailedtype "" -datatype[6].structtype[0].field[3].name "structfield" -datatype[6].structtype[0].field[3].datatype 2 -datatype[6].structtype[0].field[3].detailedtype "" -datatype[7].id -1245117006 -datatype[7].arraytype[0].datatype 0 -datatype[8].id 1328286588 -datatype[8].weightedsettype[0].datatype 2 -datatype[8].weightedsettype[0].createifnonexistant false -datatype[8].weightedsettype[0].removeifzero false -datatype[9].id 2125328771 -datatype[9].weightedsettype[0].datatype 2 -datatype[9].weightedsettype[0].createifnonexistant false -datatype[9].weightedsettype[0].removeifzero true -datatype[10].id 2065577986 -datatype[10].weightedsettype[0].datatype 2 -datatype[10].weightedsettype[0].createifnonexistant true -datatype[10].weightedsettype[0].removeifzero false -datatype[11].id -1244829667 -datatype[11].arraytype[0].datatype 109267174 -datatype[12].id -1584287606 -datatype[12].maptype[0].keytype 2 -datatype[12].maptype[0].valtype 0 -datatype[13].id 2125154557 -datatype[13].maptype[0].keytype 2 -datatype[13].maptype[0].valtype 1 -datatype[14].id -1715531035 +datatype[1].id -794985308 +datatype[1].arraytype[0].datatype 1707615575 +datatype[2].id 1707615575 +datatype[2].arraytype[0].datatype -1486737430 +datatype[3].id 1416345047 +datatype[3].arraytype[0].datatype -372512406 +datatype[4].id 69621385 +datatype[4].arraytype[0].datatype 339965458 +datatype[5].id 49942803 +datatype[5].arraytype[0].datatype 16 +datatype[6].id -1245117006 +datatype[6].arraytype[0].datatype 0 +datatype[7].id 759956026 +datatype[7].arraytype[0].datatype -2092985853 +datatype[8].id -1244829667 +datatype[8].arraytype[0].datatype 109267174 +datatype[9].id -1486737430 +datatype[9].arraytype[0].datatype 2 +datatype[10].id -372512406 +datatype[10].maptype[0].keytype 0 +datatype[10].maptype[0].valtype 1707615575 +datatype[11].id 2138385264 +datatype[11].maptype[0].keytype 0 +datatype[11].maptype[0].valtype 5 +datatype[12].id -389833101 +datatype[12].maptype[0].keytype 0 +datatype[12].maptype[0].valtype 294108848 +datatype[13].id -1715531035 +datatype[13].maptype[0].keytype 0 +datatype[13].maptype[0].valtype 4 +datatype[14].id 1901258752 datatype[14].maptype[0].keytype 0 -datatype[14].maptype[0].valtype 4 -datatype[15].id 2138385264 -datatype[15].maptype[0].keytype 0 -datatype[15].maptype[0].valtype 5 -datatype[16].id 435886609 +datatype[14].maptype[0].valtype -2092985853 +datatype[15].id 435886609 +datatype[15].maptype[0].keytype 2 +datatype[15].maptype[0].valtype -1245117006 +datatype[16].id 2125154557 datatype[16].maptype[0].keytype 2 -datatype[16].maptype[0].valtype -1245117006 -datatype[17].id -1486737430 -datatype[17].arraytype[0].datatype 2 -datatype[18].id 1707615575 -datatype[18].arraytype[0].datatype -1486737430 -datatype[19].id -794985308 -datatype[19].arraytype[0].datatype 1707615575 -datatype[20].id 69621385 -datatype[20].arraytype[0].datatype 339965458 -datatype[21].id -372512406 -datatype[21].maptype[0].keytype 0 -datatype[21].maptype[0].valtype 1707615575 -datatype[22].id 1416345047 -datatype[22].arraytype[0].datatype -372512406 -datatype[23].id 1901258752 -datatype[23].maptype[0].keytype 0 -datatype[23].maptype[0].valtype -2092985853 -datatype[24].id 759956026 -datatype[24].arraytype[0].datatype -2092985853 -datatype[25].id -389833101 -datatype[25].maptype[0].keytype 0 -datatype[25].maptype[0].valtype 294108848 +datatype[16].maptype[0].valtype 1 +datatype[17].id -1584287606 +datatype[17].maptype[0].keytype 2 +datatype[17].maptype[0].valtype 0 +datatype[18].id -1865479609 +datatype[18].maptype[0].keytype 2 +datatype[18].maptype[0].valtype 4 +datatype[19].id 339965458 +datatype[19].maptype[0].keytype 2 +datatype[19].maptype[0].valtype 2 +datatype[20].id 1328286588 +datatype[20].weightedsettype[0].datatype 2 +datatype[20].weightedsettype[0].createifnonexistant false +datatype[20].weightedsettype[0].removeifzero false +datatype[21].id 2065577986 +datatype[21].weightedsettype[0].datatype 2 +datatype[21].weightedsettype[0].createifnonexistant true +datatype[21].weightedsettype[0].removeifzero false +datatype[22].id 2125328771 +datatype[22].weightedsettype[0].datatype 2 +datatype[22].weightedsettype[0].createifnonexistant false +datatype[22].weightedsettype[0].removeifzero true +datatype[23].id 294108848 +datatype[23].structtype[0].name "folder" +datatype[23].structtype[0].version 0 +datatype[23].structtype[0].compresstype NONE +datatype[23].structtype[0].compresslevel 0 +datatype[23].structtype[0].compressthreshold 95 +datatype[23].structtype[0].compressminsize 800 +datatype[23].structtype[0].field[0].name "Version" +datatype[23].structtype[0].field[0].datatype 0 +datatype[23].structtype[0].field[0].detailedtype "" +datatype[23].structtype[0].field[1].name "Name" +datatype[23].structtype[0].field[1].datatype 2 +datatype[23].structtype[0].field[1].detailedtype "" +datatype[23].structtype[0].field[2].name "FlagsCounter" +datatype[23].structtype[0].field[2].datatype -1865479609 +datatype[23].structtype[0].field[2].detailedtype "" +datatype[23].structtype[0].field[3].name "anotherfolder" +datatype[23].structtype[0].field[3].datatype 294108848 +datatype[23].structtype[0].field[3].detailedtype "" +datatype[24].id -2092985853 +datatype[24].structtype[0].name "mystruct" +datatype[24].structtype[0].version 0 +datatype[24].structtype[0].compresstype NONE +datatype[24].structtype[0].compresslevel 0 +datatype[24].structtype[0].compressthreshold 95 +datatype[24].structtype[0].compressminsize 800 +datatype[24].structtype[0].field[0].name "bytearr" +datatype[24].structtype[0].field[0].datatype 49942803 +datatype[24].structtype[0].field[0].detailedtype "" +datatype[24].structtype[0].field[1].name "mymap" +datatype[24].structtype[0].field[1].datatype 339965458 +datatype[24].structtype[0].field[1].detailedtype "" +datatype[24].structtype[0].field[2].name "title" +datatype[24].structtype[0].field[2].datatype 2 +datatype[24].structtype[0].field[2].detailedtype "" +datatype[24].structtype[0].field[3].name "structfield" +datatype[24].structtype[0].field[3].datatype 2 +datatype[24].structtype[0].field[3].detailedtype "" +datatype[25].id 109267174 +datatype[25].structtype[0].name "sct" +datatype[25].structtype[0].version 0 +datatype[25].structtype[0].compresstype NONE +datatype[25].structtype[0].compresslevel 0 +datatype[25].structtype[0].compressthreshold 95 +datatype[25].structtype[0].compressminsize 800 +datatype[25].structtype[0].field[0].name "s1" +datatype[25].structtype[0].field[0].datatype 2 +datatype[25].structtype[0].field[0].detailedtype "" +datatype[25].structtype[0].field[1].name "s2" +datatype[25].structtype[0].field[1].datatype 2 +datatype[25].structtype[0].field[1].detailedtype "" datatype[26].id 1328581348 datatype[26].structtype[0].name "types.header" datatype[26].structtype[0].version 0 diff --git a/config-model/src/test/configmodel/types/documenttypes.cfg b/config-model/src/test/configmodel/types/documenttypes.cfg index 8f576715a4f..94551567352 100644 --- a/config-model/src/test/configmodel/types/documenttypes.cfg +++ b/config-model/src/test/configmodel/types/documenttypes.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[0].id -853072901 documenttype[0].name "types" documenttype[0].version 0 diff --git a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg index 283e5c2fe79..61c92eee8d1 100644 --- a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg +++ b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[0].id -1368624373 documenttype[0].name "other_doc" documenttype[0].version 0 @@ -26,7 +27,7 @@ documenttype[1].version 0 documenttype[1].headerstruct 1328581348 documenttype[1].bodystruct 0 documenttype[1].inherits[0].id 8 -documenttype[1].datatype[0].id -1368624373 +documenttype[1].datatype[0].id 1328581348 documenttype[1].datatype[0].type STRUCT documenttype[1].datatype[0].array.element.id 0 documenttype[1].datatype[0].map.key.id 0 @@ -35,29 +36,14 @@ documenttype[1].datatype[0].wset.key.id 0 documenttype[1].datatype[0].wset.createifnonexistent false documenttype[1].datatype[0].wset.removeifzero false documenttype[1].datatype[0].annotationref.annotation.id 0 -documenttype[1].datatype[0].sstruct.name "other_doc" +documenttype[1].datatype[0].sstruct.name "types.header" documenttype[1].datatype[0].sstruct.version 0 documenttype[1].datatype[0].sstruct.compression.type NONE documenttype[1].datatype[0].sstruct.compression.level 0 documenttype[1].datatype[0].sstruct.compression.threshold 95 documenttype[1].datatype[0].sstruct.compression.minsize 200 -documenttype[1].datatype[1].id 1328581348 -documenttype[1].datatype[1].type STRUCT -documenttype[1].datatype[1].array.element.id 0 -documenttype[1].datatype[1].map.key.id 0 -documenttype[1].datatype[1].map.value.id 0 -documenttype[1].datatype[1].wset.key.id 0 -documenttype[1].datatype[1].wset.createifnonexistent false -documenttype[1].datatype[1].wset.removeifzero false -documenttype[1].datatype[1].annotationref.annotation.id 0 -documenttype[1].datatype[1].sstruct.name "types.header" -documenttype[1].datatype[1].sstruct.version 0 -documenttype[1].datatype[1].sstruct.compression.type NONE -documenttype[1].datatype[1].sstruct.compression.level 0 -documenttype[1].datatype[1].sstruct.compression.threshold 95 -documenttype[1].datatype[1].sstruct.compression.minsize 200 -documenttype[1].datatype[1].sstruct.field[0].name "doc_field" -documenttype[1].datatype[1].sstruct.field[0].id 819293364 -documenttype[1].datatype[1].sstruct.field[0].datatype -1368624373 -documenttype[1].datatype[1].sstruct.field[0].detailedtype "" +documenttype[1].datatype[0].sstruct.field[0].name "doc_field" +documenttype[1].datatype[0].sstruct.field[0].id 819293364 +documenttype[1].datatype[0].sstruct.field[0].datatype -1368624373 +documenttype[1].datatype[0].sstruct.field[0].detailedtype "" documenttype[1].fieldsets{[document]}.fields[0] "doc_field" diff --git a/config-model/src/test/configmodel/types/references/documentmanager_multiple_imported_fields.cfg b/config-model/src/test/configmodel/types/references/documentmanager_multiple_imported_fields.cfg index 7ae73c23685..bf7632a504c 100644 --- a/config-model/src/test/configmodel/types/references/documentmanager_multiple_imported_fields.cfg +++ b/config-model/src/test/configmodel/types/references/documentmanager_multiple_imported_fields.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 diff --git a/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg b/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg index a613c2c034d..d105b894b63 100644 --- a/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg +++ b/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg index 2b6e2e852a3..d7c9ddf8a70 100644 --- a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg +++ b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 diff --git a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg index bab281cca36..a99bac3a831 100644 --- a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg +++ b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 diff --git a/config-model/src/test/configmodel/types/references/documenttypes_multiple_imported_fields.cfg b/config-model/src/test/configmodel/types/references/documenttypes_multiple_imported_fields.cfg index 242310b57a4..d992839d5d9 100644 --- a/config-model/src/test/configmodel/types/references/documenttypes_multiple_imported_fields.cfg +++ b/config-model/src/test/configmodel/types/references/documenttypes_multiple_imported_fields.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[0].id 2987301 documenttype[0].name "ad" documenttype[0].version 0 diff --git a/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg b/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg index f925ac99a25..19bcb81db38 100644 --- a/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg +++ b/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[].id 2987301 documenttype[].name "ad" documenttype[].version 0 diff --git a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg index c3aba21a498..68ed924615f 100644 --- a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg +++ b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[0].id 2987301 documenttype[0].name "ad" documenttype[0].version 0 diff --git a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg index c5930449dc1..6415e62cd7e 100644 --- a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg +++ b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[0].id 2987301 documenttype[0].name "ad" documenttype[0].version 0 diff --git a/config-model/src/test/derived/advanced/documentmanager.cfg b/config-model/src/test/derived/advanced/documentmanager.cfg index 4da92d82fb9..c317c19b09b 100644 --- a/config-model/src/test/derived/advanced/documentmanager.cfg +++ b/config-model/src/test/derived/advanced/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/advanced/summary.cfg b/config-model/src/test/derived/advanced/summary.cfg index f497461b460..e4845c1994e 100644 --- a/config-model/src/test/derived/advanced/summary.cfg +++ b/config-model/src/test/derived/advanced/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1271952241 +usev8geopositions false classes[].id 1271952241 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg b/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg index aa74ecebd5b..cdf554cb747 100644 --- a/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg b/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg index e103218793d..9633eaa532b 100644 --- a/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg b/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg index 5b5b2ac348f..85aef02bb3c 100644 --- a/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg b/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg index 1f71057f268..a8d46f2a940 100644 --- a/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg +++ b/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationsreference/documentmanager.cfg b/config-model/src/test/derived/annotationsreference/documentmanager.cfg index 737bcbf3cac..7ce09a97be1 100644 --- a/config-model/src/test/derived/annotationsreference/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsreference/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationssimple/documentmanager.cfg b/config-model/src/test/derived/annotationssimple/documentmanager.cfg index 3af65e96558..1342a179239 100644 --- a/config-model/src/test/derived/annotationssimple/documentmanager.cfg +++ b/config-model/src/test/derived/annotationssimple/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationsstruct/documentmanager.cfg b/config-model/src/test/derived/annotationsstruct/documentmanager.cfg index 0a1cda99a95..233c2f729fe 100644 --- a/config-model/src/test/derived/annotationsstruct/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsstruct/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg b/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg index fca86c58ffa..19c1c5eda2f 100644 --- a/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg +++ b/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/array_of_struct_attribute/summary.cfg b/config-model/src/test/derived/array_of_struct_attribute/summary.cfg index 965c875d5ce..e750d1454e8 100644 --- a/config-model/src/test/derived/array_of_struct_attribute/summary.cfg +++ b/config-model/src/test/derived/array_of_struct_attribute/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 252850086 +usev8geopositions false classes[].id 252850086 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/arrays/documentmanager.cfg b/config-model/src/test/derived/arrays/documentmanager.cfg index f542a936574..554cf017b54 100644 --- a/config-model/src/test/derived/arrays/documentmanager.cfg +++ b/config-model/src/test/derived/arrays/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/attributeprefetch/documentmanager.cfg b/config-model/src/test/derived/attributeprefetch/documentmanager.cfg index dc208a86913..b26698d83a6 100644 --- a/config-model/src/test/derived/attributeprefetch/documentmanager.cfg +++ b/config-model/src/test/derived/attributeprefetch/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/attributeprefetch/summary.cfg b/config-model/src/test/derived/attributeprefetch/summary.cfg index f0189f9a3c7..f52952e2871 100644 --- a/config-model/src/test/derived/attributeprefetch/summary.cfg +++ b/config-model/src/test/derived/attributeprefetch/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1151071433 +usev8geopositions false classes[].id 1151071433 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/complex/documentmanager.cfg b/config-model/src/test/derived/complex/documentmanager.cfg index 50d5dac1ef9..bc5947ad2b5 100644 --- a/config-model/src/test/derived/complex/documentmanager.cfg +++ b/config-model/src/test/derived/complex/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/complex/summary.cfg b/config-model/src/test/derived/complex/summary.cfg index 2dac4736d23..1eeef44cd54 100644 --- a/config-model/src/test/derived/complex/summary.cfg +++ b/config-model/src/test/derived/complex/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1506848752 +usev8geopositions false classes[].id 1506848752 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/emptychild/summary.cfg b/config-model/src/test/derived/emptychild/summary.cfg index 82bed7fd55e..733fa1fde54 100644 --- a/config-model/src/test/derived/emptychild/summary.cfg +++ b/config-model/src/test/derived/emptychild/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1814603381 +usev8geopositions false classes[].id 1814603381 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/emptydefault/documentmanager.cfg b/config-model/src/test/derived/emptydefault/documentmanager.cfg index e69b2c5d8c3..f4234aee087 100644 --- a/config-model/src/test/derived/emptydefault/documentmanager.cfg +++ b/config-model/src/test/derived/emptydefault/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/emptydefault/summary.cfg b/config-model/src/test/derived/emptydefault/summary.cfg index 61294d97b4c..5aacec1d0b6 100644 --- a/config-model/src/test/derived/emptydefault/summary.cfg +++ b/config-model/src/test/derived/emptydefault/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1151071433 +usev8geopositions false classes[].id 1151071433 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/id/documentmanager.cfg b/config-model/src/test/derived/id/documentmanager.cfg index 8ee82cdd946..dad69375887 100644 --- a/config-model/src/test/derived/id/documentmanager.cfg +++ b/config-model/src/test/derived/id/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/id/summary.cfg b/config-model/src/test/derived/id/summary.cfg index b50b970afe2..f7e9f6a239f 100644 --- a/config-model/src/test/derived/id/summary.cfg +++ b/config-model/src/test/derived/id/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1814716401 +usev8geopositions false classes[].id 1814716401 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/imported_fields_inherited_reference/documenttypes.cfg b/config-model/src/test/derived/imported_fields_inherited_reference/documenttypes.cfg index ca490b053f7..311c85a6bb1 100644 --- a/config-model/src/test/derived/imported_fields_inherited_reference/documenttypes.cfg +++ b/config-model/src/test/derived/imported_fields_inherited_reference/documenttypes.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[].id -94853056 documenttype[].name "child_a" documenttype[].version 0 diff --git a/config-model/src/test/derived/imported_position_field/summary.cfg b/config-model/src/test/derived/imported_position_field/summary.cfg index 3ab8e7e29e5..722443641cd 100644 --- a/config-model/src/test/derived/imported_position_field/summary.cfg +++ b/config-model/src/test/derived/imported_position_field/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1570252291 +usev8geopositions false classes[].id 1570252291 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/imported_position_field_summary/summary.cfg b/config-model/src/test/derived/imported_position_field_summary/summary.cfg index 76faac23170..0642382aabe 100644 --- a/config-model/src/test/derived/imported_position_field_summary/summary.cfg +++ b/config-model/src/test/derived/imported_position_field_summary/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1194448774 +usev8geopositions false classes[].id 1194448774 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/imported_struct_fields/summary.cfg b/config-model/src/test/derived/imported_struct_fields/summary.cfg index ab6c6853925..0a9b29524e1 100644 --- a/config-model/src/test/derived/imported_struct_fields/summary.cfg +++ b/config-model/src/test/derived/imported_struct_fields/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1570252291 +usev8geopositions false classes[].id 1570252291 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/importedfields/summary.cfg b/config-model/src/test/derived/importedfields/summary.cfg index 74b5b44214e..975c601a97d 100644 --- a/config-model/src/test/derived/importedfields/summary.cfg +++ b/config-model/src/test/derived/importedfields/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1294344677 +usev8geopositions false classes[].id 1294344677 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/indexswitches/documentmanager.cfg b/config-model/src/test/derived/indexswitches/documentmanager.cfg index ffeaab177ba..8ba249ed751 100644 --- a/config-model/src/test/derived/indexswitches/documentmanager.cfg +++ b/config-model/src/test/derived/indexswitches/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/indexswitches/summary.cfg b/config-model/src/test/derived/indexswitches/summary.cfg index d04bc4eb167..bcc050dad4f 100644 --- a/config-model/src/test/derived/indexswitches/summary.cfg +++ b/config-model/src/test/derived/indexswitches/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1698765342 +usev8geopositions false classes[].id 1698765342 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/inheritance/documentmanager.cfg b/config-model/src/test/derived/inheritance/documentmanager.cfg index e054019bd8f..4a25f8c3a64 100644 --- a/config-model/src/test/derived/inheritance/documentmanager.cfg +++ b/config-model/src/test/derived/inheritance/documentmanager.cfg @@ -1,115 +1,106 @@ enablecompression false -datatype[].id 1381038251 -datatype[].structtype[].name "position" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "x" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "y" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 990971719 -datatype[].structtype[].name "grandparent.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "onlygrandparent" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "overridden" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id -154107656 -datatype[].documenttype[].name "grandparent" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 990971719 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlygrandparent" -datatype[].documenttype[].fieldsets{[document]}.fields[] "overridden" -datatype[].id 1306663898 -datatype[].structtype[].name "mother.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "onlymother" -datatype[].structtype[].field[].datatype 2 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "overridden" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id -158393403 -datatype[].documenttype[].name "mother" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "grandparent" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 1306663898 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlygrandparent" -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlymother" -datatype[].documenttype[].fieldsets{[document]}.fields[] "overridden" -datatype[].id 2126589281 -datatype[].structtype[].name "father.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "onlyfather" -datatype[].structtype[].field[].datatype 2 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "overridden" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 986686494 -datatype[].documenttype[].name "father" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "grandparent" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 2126589281 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlyfather" -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlygrandparent" -datatype[].documenttype[].fieldsets{[document]}.fields[] "overridden" -datatype[].id 81425825 -datatype[].structtype[].name "child.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "onlychild" -datatype[].structtype[].field[].datatype 2 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "overridden" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 746267614 -datatype[].documenttype[].name "child" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "father" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "mother" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 81425825 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlychild" -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlyfather" -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlygrandparent" -datatype[].documenttype[].fieldsets{[document]}.fields[] "onlymother" -datatype[].documenttype[].fieldsets{[document]}.fields[] "overridden" +usev8geopositions false +doctype[0].name "document" +doctype[0].idx 10000 +doctype[0].contentstruct 10001 +doctype[0].primitivetype[0].idx 10002 +doctype[0].primitivetype[0].name "byte" +doctype[0].primitivetype[1].idx 10003 +doctype[0].primitivetype[1].name "int" +doctype[0].primitivetype[2].idx 10004 +doctype[0].primitivetype[2].name "long" +doctype[0].primitivetype[3].idx 10005 +doctype[0].primitivetype[3].name "string" +doctype[0].primitivetype[4].idx 10006 +doctype[0].primitivetype[4].name "raw" +doctype[0].primitivetype[5].idx 10008 +doctype[0].primitivetype[5].name "float" +doctype[0].primitivetype[6].idx 10009 +doctype[0].primitivetype[6].name "double" +doctype[0].primitivetype[7].idx 10011 +doctype[0].primitivetype[7].name "uri" +doctype[0].primitivetype[8].idx 10012 +doctype[0].primitivetype[8].name "predicate" +doctype[0].primitivetype[9].idx 10013 +doctype[0].primitivetype[9].name "bool" +doctype[0].primitivetype[10].idx 10014 +doctype[0].primitivetype[10].name "float16" +doctype[0].wsettype[0].idx 10007 +doctype[0].wsettype[0].elementtype 10005 +doctype[0].wsettype[0].createifnonexistent true +doctype[0].wsettype[0].removeifzero true +doctype[0].structtype[0].idx 10001 +doctype[0].structtype[0].name "document.header" +doctype[0].structtype[1].idx 10010 +doctype[0].structtype[1].name "position" +doctype[0].structtype[1].field[0].name "x" +doctype[0].structtype[1].field[0].internalid 914677694 +doctype[0].structtype[1].field[0].type 10003 +doctype[0].structtype[1].field[1].name "y" +doctype[0].structtype[1].field[1].internalid 900009410 +doctype[0].structtype[1].field[1].type 10003 +doctype[1].name "grandparent" +doctype[1].idx 10015 +doctype[1].inherits[0].idx 10000 +doctype[1].contentstruct 10016 +doctype[1].fieldsets{[document]}.fields[0] "onlygrandparent" +doctype[1].fieldsets{[document]}.fields[1] "overridden" +doctype[1].structtype[0].idx 10016 +doctype[1].structtype[0].name "grandparent.header" +doctype[1].structtype[0].field[0].name "onlygrandparent" +doctype[1].structtype[0].field[0].internalid 1456982690 +doctype[1].structtype[0].field[0].type 10003 +doctype[1].structtype[0].field[1].name "overridden" +doctype[1].structtype[0].field[1].internalid 1314355415 +doctype[1].structtype[0].field[1].type 10003 +doctype[2].name "mother" +doctype[2].idx 10017 +doctype[2].inherits[0].idx 10015 +doctype[2].inherits[1].idx 10000 +doctype[2].contentstruct 10018 +doctype[2].fieldsets{[document]}.fields[0] "onlygrandparent" +doctype[2].fieldsets{[document]}.fields[1] "onlymother" +doctype[2].fieldsets{[document]}.fields[2] "overridden" +doctype[2].structtype[0].idx 10018 +doctype[2].structtype[0].name "mother.header" +doctype[2].structtype[0].field[0].name "onlymother" +doctype[2].structtype[0].field[0].internalid 1390999339 +doctype[2].structtype[0].field[0].type 10005 +doctype[2].structtype[0].field[1].name "overridden" +doctype[2].structtype[0].field[1].internalid 1314355415 +doctype[2].structtype[0].field[1].type 10003 +doctype[3].name "father" +doctype[3].idx 10019 +doctype[3].inherits[0].idx 10015 +doctype[3].inherits[1].idx 10000 +doctype[3].contentstruct 10020 +doctype[3].fieldsets{[document]}.fields[0] "onlyfather" +doctype[3].fieldsets{[document]}.fields[1] "onlygrandparent" +doctype[3].fieldsets{[document]}.fields[2] "overridden" +doctype[3].structtype[0].idx 10020 +doctype[3].structtype[0].name "father.header" +doctype[3].structtype[0].field[0].name "onlyfather" +doctype[3].structtype[0].field[0].internalid 1083094308 +doctype[3].structtype[0].field[0].type 10005 +doctype[3].structtype[0].field[1].name "overridden" +doctype[3].structtype[0].field[1].internalid 1314355415 +doctype[3].structtype[0].field[1].type 10003 +doctype[4].name "child" +doctype[4].idx 10021 +doctype[4].inherits[0].idx 10000 +doctype[4].inherits[1].idx 10019 +doctype[4].inherits[2].idx 10017 +doctype[4].contentstruct 10022 +doctype[4].fieldsets{[document]}.fields[0] "onlychild" +doctype[4].fieldsets{[document]}.fields[1] "onlyfather" +doctype[4].fieldsets{[document]}.fields[2] "onlygrandparent" +doctype[4].fieldsets{[document]}.fields[3] "onlymother" +doctype[4].fieldsets{[document]}.fields[4] "overridden" +doctype[4].structtype[0].idx 10022 +doctype[4].structtype[0].name "child.header" +doctype[4].structtype[0].field[0].name "onlychild" +doctype[4].structtype[0].field[0].internalid 1737375598 +doctype[4].structtype[0].field[0].type 10005 +doctype[4].structtype[0].field[1].name "overridden" +doctype[4].structtype[0].field[1].internalid 1314355415 +doctype[4].structtype[0].field[1].type 10003 diff --git a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg b/config-model/src/test/derived/inheritance/mother/documentmanager.cfg deleted file mode 100644 index 71da9265521..00000000000 --- a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg +++ /dev/null @@ -1,175 +0,0 @@ -enablecompression false -datatype[-126593034].id -126593034 -datatype[-126593034].structtype[single].name "child.body" -datatype[-126593034].structtype[single].version 0 -datatype[-141935690].id -141935690 -datatype[-141935690].structtype[single].name "search_smartsummary" -datatype[-141935690].structtype[single].version 0 -datatype[-141935690].structtype[single].field[abstract].datatype 2 -datatype[-141935690].structtype[single].field[abstract].name "abstract" -datatype[-141935690].structtype[single].field[dispurl].datatype 2 -datatype[-141935690].structtype[single].field[dispurl].name "dispurl" -datatype[-141935690].structtype[single].field[title].datatype 2 -datatype[-141935690].structtype[single].field[title].name "title" -datatype[-1467672569].id -1467672569 -datatype[-1467672569].structtype[single].name "child_search.body" -datatype[-1467672569].structtype[single].version 0 -datatype[-154107656].id -154107656 -datatype[-154107656].documenttype[single].bodystruct 978262812 -datatype[-154107656].documenttype[single].headerstruct 990971719 -datatype[-154107656].documenttype[single].name "grandparent" -datatype[-154107656].documenttype[single].version 0 -datatype[-158393403].id -158393403 -datatype[-158393403].documenttype[single].bodystruct -1989003153 -datatype[-158393403].documenttype[single].headerstruct 1306663898 -datatype[-158393403].documenttype[single].name "mother" -datatype[-158393403].documenttype[single].version 0 -datatype[-158393403].documenttype[single].inherits[grandparent].name "grandparent" -datatype[-158393403].documenttype[single].inherits[grandparent].version 0 -datatype[-1740240543].id -1740240543 -datatype[-1740240543].structtype[single].name "search_feature" -datatype[-1740240543].structtype[single].version 0 -datatype[-1740240543].structtype[single].field[name].datatype 2 -datatype[-1740240543].structtype[single].field[name].name "name" -datatype[-1740240543].structtype[single].field[value].datatype 5 -datatype[-1740240543].structtype[single].field[value].name "value" -datatype[-1742340170].id -1742340170 -datatype[-1742340170].structtype[single].name "father.body" -datatype[-1742340170].structtype[single].version 0 -datatype[-1852215954].id -1852215954 -datatype[-1852215954].structtype[single].name "mother_search.body" -datatype[-1852215954].structtype[single].version 0 -datatype[-1962244686].id -1962244686 -datatype[-1962244686].structtype[single].name "father_search.header" -datatype[-1962244686].structtype[single].version 0 -datatype[-1962244686].structtype[single].field[onlyfather].datatype 2 -datatype[-1962244686].structtype[single].field[onlyfather].name "onlyfather" -datatype[-1962244686].structtype[single].field[onlygrandparent].datatype 0 -datatype[-1962244686].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-1962244686].structtype[single].field[overridden].datatype 0 -datatype[-1962244686].structtype[single].field[overridden].name "overridden" -datatype[-1989003153].id -1989003153 -datatype[-1989003153].structtype[single].name "mother.body" -datatype[-1989003153].structtype[single].version 0 -datatype[-205818510].id -205818510 -datatype[-205818510].structtype[single].name "child_search.header" -datatype[-205818510].structtype[single].version 0 -datatype[-205818510].structtype[single].field[onlychild].datatype 2 -datatype[-205818510].structtype[single].field[onlychild].name "onlychild" -datatype[-205818510].structtype[single].field[onlyfather].datatype 2 -datatype[-205818510].structtype[single].field[onlyfather].name "onlyfather" -datatype[-205818510].structtype[single].field[onlygrandparent].datatype 0 -datatype[-205818510].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-205818510].structtype[single].field[onlymother].datatype 2 -datatype[-205818510].structtype[single].field[onlymother].name "onlymother" -datatype[-205818510].structtype[single].field[overridden].datatype 0 -datatype[-205818510].structtype[single].field[overridden].name "overridden" -datatype[-384824039].id -384824039 -datatype[-384824039].structtype[single].name "mother_search.header" -datatype[-384824039].structtype[single].version 0 -datatype[-384824039].structtype[single].field[onlygrandparent].datatype 0 -datatype[-384824039].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-384824039].structtype[single].field[onlymother].datatype 2 -datatype[-384824039].structtype[single].field[onlymother].name "onlymother" -datatype[-384824039].structtype[single].field[overridden].datatype 0 -datatype[-384824039].structtype[single].field[overridden].name "overridden" -datatype[-52742073].id -52742073 -datatype[-52742073].structtype[single].name "father_search.body" -datatype[-52742073].structtype[single].version 0 -datatype[-580592339].id -580592339 -datatype[-580592339].documenttype[single].bodystruct -1467672569 -datatype[-580592339].documenttype[single].headerstruct -205818510 -datatype[-580592339].documenttype[single].name "child_search" -datatype[-580592339].documenttype[single].version 0 -datatype[-876064862].id -876064862 -datatype[-876064862].structtype[single].name "search_position" -datatype[-876064862].structtype[single].version 0 -datatype[-876064862].structtype[single].field[x].datatype 0 -datatype[-876064862].structtype[single].field[x].name "x" -datatype[-876064862].structtype[single].field[y].datatype 0 -datatype[-876064862].structtype[single].field[y].name "y" -datatype[1306663898].id 1306663898 -datatype[1306663898].structtype[single].name "mother.header" -datatype[1306663898].structtype[single].version 0 -datatype[1306663898].structtype[single].field[onlymother].datatype 2 -datatype[1306663898].structtype[single].field[onlymother].name "onlymother" -datatype[1464571117].id 1464571117 -datatype[1464571117].documenttype[single].bodystruct -52742073 -datatype[1464571117].documenttype[single].headerstruct -1962244686 -datatype[1464571117].documenttype[single].name "father_search" -datatype[1464571117].documenttype[single].version 0 -datatype[147991900].id 147991900 -datatype[147991900].arraytype[single].datatype -1740240543 -datatype[1530060044].id 1530060044 -datatype[1530060044].structtype[single].name "grandparent_search.header" -datatype[1530060044].structtype[single].version 0 -datatype[1530060044].structtype[single].field[onlygrandparent].datatype 0 -datatype[1530060044].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[1530060044].structtype[single].field[overridden].datatype 0 -datatype[1530060044].structtype[single].field[overridden].name "overridden" -datatype[1845861921].id 1845861921 -datatype[1845861921].structtype[single].name "grandparent_search.body" -datatype[1845861921].structtype[single].version 0 -datatype[2126589281].id 2126589281 -datatype[2126589281].structtype[single].name "father.header" -datatype[2126589281].structtype[single].version 0 -datatype[2126589281].structtype[single].field[onlyfather].datatype 2 -datatype[2126589281].structtype[single].field[onlyfather].name "onlyfather" -datatype[328953555].id 328953555 -datatype[328953555].documenttype[single].bodystruct 1845861921 -datatype[328953555].documenttype[single].headerstruct 1530060044 -datatype[328953555].documenttype[single].name "grandparent_search" -datatype[328953555].documenttype[single].version 0 -datatype[464784087].id 464784087 -datatype[464784087].structtype[single].name "search_uri" -datatype[464784087].structtype[single].version 0 -datatype[464784087].structtype[single].field[all].datatype 2 -datatype[464784087].structtype[single].field[all].name "all" -datatype[464784087].structtype[single].field[fragment].datatype 2 -datatype[464784087].structtype[single].field[fragment].name "fragment" -datatype[464784087].structtype[single].field[host].datatype 2 -datatype[464784087].structtype[single].field[host].name "host" -datatype[464784087].structtype[single].field[path].datatype 2 -datatype[464784087].structtype[single].field[path].name "path" -datatype[464784087].structtype[single].field[port].datatype 0 -datatype[464784087].structtype[single].field[port].name "port" -datatype[464784087].structtype[single].field[query].datatype 2 -datatype[464784087].structtype[single].field[query].name "query" -datatype[464784087].structtype[single].field[scheme].datatype 2 -datatype[464784087].structtype[single].field[scheme].name "scheme" -datatype[644645734].id 644645734 -datatype[644645734].documenttype[single].bodystruct -1852215954 -datatype[644645734].documenttype[single].headerstruct -384824039 -datatype[644645734].documenttype[single].name "mother_search" -datatype[644645734].documenttype[single].version 0 -datatype[746267614].id 746267614 -datatype[746267614].documenttype[single].bodystruct -126593034 -datatype[746267614].documenttype[single].headerstruct 81425825 -datatype[746267614].documenttype[single].name "child" -datatype[746267614].documenttype[single].version 0 -datatype[746267614].documenttype[single].inherits[father].name "father" -datatype[746267614].documenttype[single].inherits[father].version 0 -datatype[746267614].documenttype[single].inherits[mother].name "mother" -datatype[746267614].documenttype[single].inherits[mother].version 0 -datatype[81425825].id 81425825 -datatype[81425825].structtype[single].name "child.header" -datatype[81425825].structtype[single].version 0 -datatype[81425825].structtype[single].field[onlychild].datatype 2 -datatype[81425825].structtype[single].field[onlychild].name "onlychild" -datatype[978262812].id 978262812 -datatype[978262812].structtype[single].name "grandparent.body" -datatype[978262812].structtype[single].version 0 -datatype[986686494].id 986686494 -datatype[986686494].documenttype[single].bodystruct -1742340170 -datatype[986686494].documenttype[single].headerstruct 2126589281 -datatype[986686494].documenttype[single].name "father" -datatype[986686494].documenttype[single].version 0 -datatype[986686494].documenttype[single].inherits[grandparent].name "grandparent" -datatype[986686494].documenttype[single].inherits[grandparent].version 0 -datatype[990971719].id 990971719 -datatype[990971719].structtype[single].name "grandparent.header" -datatype[990971719].structtype[single].version 0 -datatype[990971719].structtype[single].field[onlygrandparent].datatype 0 -datatype[990971719].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[990971719].structtype[single].field[overridden].datatype 0 -datatype[990971719].structtype[single].field[overridden].name "overridden" diff --git a/config-model/src/test/derived/inheritance/summary.cfg b/config-model/src/test/derived/inheritance/summary.cfg index dde9f95ecbe..73c22f82a99 100644 --- a/config-model/src/test/derived/inheritance/summary.cfg +++ b/config-model/src/test/derived/inheritance/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1797992819 +usev8geopositions false classes[].id 1797992819 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/inheritdiamond/documentmanager.cfg b/config-model/src/test/derived/inheritdiamond/documentmanager.cfg index df3f8908a60..06169c09969 100644 --- a/config-model/src/test/derived/inheritdiamond/documentmanager.cfg +++ b/config-model/src/test/derived/inheritdiamond/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 336538650 datatype[].structtype[].name "child_struct" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg b/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg index 25872641741..cc76fe939b0 100644 --- a/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg +++ b/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg @@ -1,74 +1,70 @@ enablecompression false -datatype[].id 1381038251 -datatype[].structtype[].name "position" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "x" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "y" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 1246084544 -datatype[].structtype[].name "grandparent_struct" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "grandparent_field" -datatype[].structtype[].field[].datatype 2 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 990971719 -datatype[].structtype[].name "grandparent.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].id -154107656 -datatype[].documenttype[].name "grandparent" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 990971719 -datatype[].documenttype[].bodystruct 0 -datatype[].id 836075987 -datatype[].structtype[].name "parent.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].id 1175161836 -datatype[].documenttype[].name "parent" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "grandparent" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 836075987 -datatype[].documenttype[].bodystruct 0 -datatype[].id 81425825 -datatype[].structtype[].name "child.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "child_field" -datatype[].structtype[].field[].datatype 1246084544 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 746267614 -datatype[].documenttype[].name "child" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "parent" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 81425825 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[document]}.fields[] "child_field" +usev8geopositions false +doctype[0].name "document" +doctype[0].idx 10000 +doctype[0].contentstruct 10001 +doctype[0].primitivetype[0].idx 10002 +doctype[0].primitivetype[0].name "byte" +doctype[0].primitivetype[1].idx 10003 +doctype[0].primitivetype[1].name "int" +doctype[0].primitivetype[2].idx 10004 +doctype[0].primitivetype[2].name "long" +doctype[0].primitivetype[3].idx 10005 +doctype[0].primitivetype[3].name "string" +doctype[0].primitivetype[4].idx 10006 +doctype[0].primitivetype[4].name "raw" +doctype[0].primitivetype[5].idx 10008 +doctype[0].primitivetype[5].name "float" +doctype[0].primitivetype[6].idx 10009 +doctype[0].primitivetype[6].name "double" +doctype[0].primitivetype[7].idx 10011 +doctype[0].primitivetype[7].name "uri" +doctype[0].primitivetype[8].idx 10012 +doctype[0].primitivetype[8].name "predicate" +doctype[0].primitivetype[9].idx 10013 +doctype[0].primitivetype[9].name "bool" +doctype[0].primitivetype[10].idx 10014 +doctype[0].primitivetype[10].name "float16" +doctype[0].wsettype[0].idx 10007 +doctype[0].wsettype[0].elementtype 10005 +doctype[0].wsettype[0].createifnonexistent true +doctype[0].wsettype[0].removeifzero true +doctype[0].structtype[0].idx 10001 +doctype[0].structtype[0].name "document.header" +doctype[0].structtype[1].idx 10010 +doctype[0].structtype[1].name "position" +doctype[0].structtype[1].field[0].name "x" +doctype[0].structtype[1].field[0].internalid 914677694 +doctype[0].structtype[1].field[0].type 10003 +doctype[0].structtype[1].field[1].name "y" +doctype[0].structtype[1].field[1].internalid 900009410 +doctype[0].structtype[1].field[1].type 10003 +doctype[1].name "grandparent" +doctype[1].idx 10015 +doctype[1].inherits[0].idx 10000 +doctype[1].contentstruct 10016 +doctype[1].structtype[0].idx 10016 +doctype[1].structtype[0].name "grandparent.header" +doctype[1].structtype[1].idx 10017 +doctype[1].structtype[1].name "grandparent_struct" +doctype[1].structtype[1].field[0].name "grandparent_field" +doctype[1].structtype[1].field[0].internalid 18801796 +doctype[1].structtype[1].field[0].type 10005 +doctype[2].name "parent" +doctype[2].idx 10018 +doctype[2].inherits[0].idx 10015 +doctype[2].inherits[1].idx 10000 +doctype[2].contentstruct 10019 +doctype[2].structtype[0].idx 10019 +doctype[2].structtype[0].name "parent.header" +doctype[3].name "child" +doctype[3].idx 10020 +doctype[3].inherits[0].idx 10000 +doctype[3].inherits[1].idx 10018 +doctype[3].contentstruct 10021 +doctype[3].fieldsets{[document]}.fields[0] "child_field" +doctype[3].structtype[0].idx 10021 +doctype[3].structtype[0].name "child.header" +doctype[3].structtype[0].field[0].name "child_field" +doctype[3].structtype[0].field[0].internalid 129089854 +doctype[3].structtype[0].field[0].type 10017 diff --git a/config-model/src/test/derived/inheritfromparent/documentmanager.cfg b/config-model/src/test/derived/inheritfromparent/documentmanager.cfg index c9cd6fd3042..3c7280094be 100644 --- a/config-model/src/test/derived/inheritfromparent/documentmanager.cfg +++ b/config-model/src/test/derived/inheritfromparent/documentmanager.cfg @@ -1,66 +1,71 @@ enablecompression false -datatype[].id 1381038251 -datatype[].structtype[].name "position" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "x" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "y" -datatype[].structtype[].field[].datatype 0 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 1091188812 -datatype[].structtype[].name "parent_struct" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "parent_field" -datatype[].structtype[].field[].datatype 2 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 836075987 -datatype[].structtype[].name "parent.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "weight_src" -datatype[].structtype[].field[].datatype 1 -datatype[].structtype[].field[].detailedtype "" -datatype[].structtype[].field[].name "weight" -datatype[].structtype[].field[].datatype 1 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 1175161836 -datatype[].documenttype[].name "parent" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 836075987 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[]}.fields[] "weight_src" -datatype[].id 81425825 -datatype[].structtype[].name "child.header" -datatype[].structtype[].version 0 -datatype[].structtype[].compresstype NONE -datatype[].structtype[].compresslevel 0 -datatype[].structtype[].compressthreshold 95 -datatype[].structtype[].compressminsize 800 -datatype[].structtype[].field[].name "child_field" -datatype[].structtype[].field[].datatype 1091188812 -datatype[].structtype[].field[].detailedtype "" -datatype[].id 746267614 -datatype[].documenttype[].name "child" -datatype[].documenttype[].version 0 -datatype[].documenttype[].inherits[].name "document" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].inherits[].name "parent" -datatype[].documenttype[].inherits[].version 0 -datatype[].documenttype[].headerstruct 81425825 -datatype[].documenttype[].bodystruct 0 -datatype[].documenttype[].fieldsets{[]}.fields[] "child_field" -datatype[].documenttype[].fieldsets{[]}.fields[] "weight_src" +usev8geopositions false +doctype[0].name "document" +doctype[0].idx 10000 +doctype[0].contentstruct 10001 +doctype[0].primitivetype[0].idx 10002 +doctype[0].primitivetype[0].name "byte" +doctype[0].primitivetype[1].idx 10003 +doctype[0].primitivetype[1].name "int" +doctype[0].primitivetype[2].idx 10004 +doctype[0].primitivetype[2].name "long" +doctype[0].primitivetype[3].idx 10005 +doctype[0].primitivetype[3].name "string" +doctype[0].primitivetype[4].idx 10006 +doctype[0].primitivetype[4].name "raw" +doctype[0].primitivetype[5].idx 10008 +doctype[0].primitivetype[5].name "float" +doctype[0].primitivetype[6].idx 10009 +doctype[0].primitivetype[6].name "double" +doctype[0].primitivetype[7].idx 10011 +doctype[0].primitivetype[7].name "uri" +doctype[0].primitivetype[8].idx 10012 +doctype[0].primitivetype[8].name "predicate" +doctype[0].primitivetype[9].idx 10013 +doctype[0].primitivetype[9].name "bool" +doctype[0].primitivetype[10].idx 10014 +doctype[0].primitivetype[10].name "float16" +doctype[0].wsettype[0].idx 10007 +doctype[0].wsettype[0].elementtype 10005 +doctype[0].wsettype[0].createifnonexistent true +doctype[0].wsettype[0].removeifzero true +doctype[0].structtype[0].idx 10001 +doctype[0].structtype[0].name "document.header" +doctype[0].structtype[1].idx 10010 +doctype[0].structtype[1].name "position" +doctype[0].structtype[1].field[0].name "x" +doctype[0].structtype[1].field[0].internalid 914677694 +doctype[0].structtype[1].field[0].type 10003 +doctype[0].structtype[1].field[1].name "y" +doctype[0].structtype[1].field[1].internalid 900009410 +doctype[0].structtype[1].field[1].type 10003 +doctype[1].name "parent" +doctype[1].idx 10015 +doctype[1].inherits[0].idx 10000 +doctype[1].contentstruct 10016 +doctype[1].fieldsets{[document]}.fields[0] "weight_src" +doctype[1].structtype[0].idx 10016 +doctype[1].structtype[0].name "parent.header" +doctype[1].structtype[0].field[0].name "weight_src" +doctype[1].structtype[0].field[0].internalid 1225660233 +doctype[1].structtype[0].field[0].type 10008 +doctype[1].structtype[0].field[1].name "weight" +doctype[1].structtype[0].field[1].internalid 1001392207 +doctype[1].structtype[0].field[1].type 10008 +doctype[1].structtype[1].idx 10017 +doctype[1].structtype[1].name "parent_struct" +doctype[1].structtype[1].field[0].name "parent_field" +doctype[1].structtype[1].field[0].internalid 933533022 +doctype[1].structtype[1].field[0].type 10005 +doctype[2].name "child" +doctype[2].idx 10018 +doctype[2].inherits[0].idx 10000 +doctype[2].inherits[1].idx 10015 +doctype[2].contentstruct 10019 +doctype[2].fieldsets{[document]}.fields[0] "child_field" +doctype[2].fieldsets{[document]}.fields[1] "weight_src" +doctype[2].structtype[0].idx 10019 +doctype[2].structtype[0].name "child.header" +doctype[2].structtype[0].field[0].name "child_field" +doctype[2].structtype[0].field[0].internalid 1814271363 +doctype[2].structtype[0].field[0].type 10017 diff --git a/config-model/src/test/derived/inheritfromparent/documenttypes.cfg b/config-model/src/test/derived/inheritfromparent/documenttypes.cfg index faef3f6923b..ea7a49b1acf 100644 --- a/config-model/src/test/derived/inheritfromparent/documenttypes.cfg +++ b/config-model/src/test/derived/inheritfromparent/documenttypes.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[].id 1175161836 documenttype[].name "parent" documenttype[].version 0 diff --git a/config-model/src/test/derived/inheritstruct/child.sd b/config-model/src/test/derived/inheritstruct/child.sd index 0ac4048e5fa..fcc3cececc3 100644 --- a/config-model/src/test/derived/inheritstruct/child.sd +++ b/config-model/src/test/derived/inheritstruct/child.sd @@ -1,9 +1,22 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. search child { document child inherits parent { + struct other_struct inherits my_struct { + field my_int type int {} + } + struct wrapper { + field wrapped type my_struct {} + } + field child_struct_field type my_struct { indexing: summary | index match: prefix } + field other_field type other_struct { + indexing: summary + } + field wrapped_field type wrapper { + indexing: summary + } } } diff --git a/config-model/src/test/derived/inheritstruct/index-info.cfg b/config-model/src/test/derived/inheritstruct/index-info.cfg index 21e68f0c127..5afa91ea1bb 100644 --- a/config-model/src/test/derived/inheritstruct/index-info.cfg +++ b/config-model/src/test/derived/inheritstruct/index-info.cfg @@ -1,25 +1,51 @@ -indexinfo[].name "child" -indexinfo[].command[].indexname "sddocname" -indexinfo[].command[].command "index" -indexinfo[].command[].indexname "sddocname" -indexinfo[].command[].command "word" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "index" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "lowercase" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "stem:BEST" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "normalize" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "plain-tokens" -indexinfo[].command[].indexname "child_struct_field.my_str" -indexinfo[].command[].command "type string" -indexinfo[].command[].indexname "child_struct_field" -indexinfo[].command[].command "index" -indexinfo[].command[].indexname "child_struct_field" -indexinfo[].command[].command "lowercase" -indexinfo[].command[].indexname "child_struct_field" -indexinfo[].command[].command "plain-tokens" -indexinfo[].command[].indexname "child_struct_field" -indexinfo[].command[].command "type my_struct" +indexinfo[0].name "child" +indexinfo[0].command[0].indexname "sddocname" +indexinfo[0].command[0].command "index" +indexinfo[0].command[1].indexname "sddocname" +indexinfo[0].command[1].command "word" +indexinfo[0].command[2].indexname "child_struct_field.my_str" +indexinfo[0].command[2].command "index" +indexinfo[0].command[3].indexname "child_struct_field.my_str" +indexinfo[0].command[3].command "lowercase" +indexinfo[0].command[4].indexname "child_struct_field.my_str" +indexinfo[0].command[4].command "stem:BEST" +indexinfo[0].command[5].indexname "child_struct_field.my_str" +indexinfo[0].command[5].command "normalize" +indexinfo[0].command[6].indexname "child_struct_field.my_str" +indexinfo[0].command[6].command "plain-tokens" +indexinfo[0].command[7].indexname "child_struct_field.my_str" +indexinfo[0].command[7].command "type string" +indexinfo[0].command[8].indexname "child_struct_field" +indexinfo[0].command[8].command "index" +indexinfo[0].command[9].indexname "child_struct_field" +indexinfo[0].command[9].command "lowercase" +indexinfo[0].command[10].indexname "child_struct_field" +indexinfo[0].command[10].command "plain-tokens" +indexinfo[0].command[11].indexname "child_struct_field" +indexinfo[0].command[11].command "type my_struct" +indexinfo[0].command[12].indexname "other_field.my_str" +indexinfo[0].command[12].command "index" +indexinfo[0].command[13].indexname "other_field.my_str" +indexinfo[0].command[13].command "type string" +indexinfo[0].command[14].indexname "other_field.my_int" +indexinfo[0].command[14].command "index" +indexinfo[0].command[15].indexname "other_field.my_int" +indexinfo[0].command[15].command "numerical" +indexinfo[0].command[16].indexname "other_field.my_int" +indexinfo[0].command[16].command "type int" +indexinfo[0].command[17].indexname "other_field" +indexinfo[0].command[17].command "index" +indexinfo[0].command[18].indexname "other_field" +indexinfo[0].command[18].command "type other_struct" +indexinfo[0].command[19].indexname "wrapped_field.wrapped.my_str" +indexinfo[0].command[19].command "index" +indexinfo[0].command[20].indexname "wrapped_field.wrapped.my_str" +indexinfo[0].command[20].command "type string" +indexinfo[0].command[21].indexname "wrapped_field.wrapped" +indexinfo[0].command[21].command "index" +indexinfo[0].command[22].indexname "wrapped_field.wrapped" +indexinfo[0].command[22].command "type my_struct" +indexinfo[0].command[23].indexname "wrapped_field" +indexinfo[0].command[23].command "index" +indexinfo[0].command[24].indexname "wrapped_field" +indexinfo[0].command[24].command "type wrapper"
\ No newline at end of file diff --git a/config-model/src/test/derived/integerattributetostringindex/summary.cfg b/config-model/src/test/derived/integerattributetostringindex/summary.cfg index d5eb316ff01..bfbb1eb01ca 100644 --- a/config-model/src/test/derived/integerattributetostringindex/summary.cfg +++ b/config-model/src/test/derived/integerattributetostringindex/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1195656216 +usev8geopositions false classes[].id 1195656216 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/mail/documentmanager.cfg b/config-model/src/test/derived/mail/documentmanager.cfg index baf122d0241..b6fdbe8f210 100644 --- a/config-model/src/test/derived/mail/documentmanager.cfg +++ b/config-model/src/test/derived/mail/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/mail/onlydoc/documentmanager.cfg b/config-model/src/test/derived/mail/onlydoc/documentmanager.cfg index a19332685e9..27ac015e630 100644 --- a/config-model/src/test/derived/mail/onlydoc/documentmanager.cfg +++ b/config-model/src/test/derived/mail/onlydoc/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/mail/summary.cfg b/config-model/src/test/derived/mail/summary.cfg index 3628e1acb88..2b886a4622b 100644 --- a/config-model/src/test/derived/mail/summary.cfg +++ b/config-model/src/test/derived/mail/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1831052622 +usev8geopositions false classes[].id 1831052622 classes[].name "default" classes[].fields[].name "snippet" diff --git a/config-model/src/test/derived/map_attribute/summary.cfg b/config-model/src/test/derived/map_attribute/summary.cfg index b465bdfa541..b50b40b9b72 100644 --- a/config-model/src/test/derived/map_attribute/summary.cfg +++ b/config-model/src/test/derived/map_attribute/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1376056200 +usev8geopositions false classes[].id 1376056200 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/map_of_struct_attribute/summary.cfg b/config-model/src/test/derived/map_of_struct_attribute/summary.cfg index 67988dbf30e..cf875704fa0 100644 --- a/config-model/src/test/derived/map_of_struct_attribute/summary.cfg +++ b/config-model/src/test/derived/map_of_struct_attribute/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1131098132 +usev8geopositions false classes[].id 1131098132 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/mlr/summary.cfg b/config-model/src/test/derived/mlr/summary.cfg index b6a53a9a1d9..5bea06a8472 100644 --- a/config-model/src/test/derived/mlr/summary.cfg +++ b/config-model/src/test/derived/mlr/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1868876861 +usev8geopositions false classes[].id 1868876861 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/multiplesummaries/summary.cfg b/config-model/src/test/derived/multiplesummaries/summary.cfg index 8d85890b156..16d0024155b 100644 --- a/config-model/src/test/derived/multiplesummaries/summary.cfg +++ b/config-model/src/test/derived/multiplesummaries/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 235127765 +usev8geopositions false classes[1156201411].id 1156201411 classes[1156201411].name "attributeprefetch" classes[1156201411].fields[a].name "a" diff --git a/config-model/src/test/derived/music/summary.cfg b/config-model/src/test/derived/music/summary.cfg index bc55727b407..c92b5491ebc 100644 --- a/config-model/src/test/derived/music/summary.cfg +++ b/config-model/src/test/derived/music/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 2086497905 +usev8geopositions false classes[].id 2086497905 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/namecollision/documentmanager.cfg b/config-model/src/test/derived/namecollision/documentmanager.cfg index 8d0d89dde35..99da89f4fbf 100644 --- a/config-model/src/test/derived/namecollision/documentmanager.cfg +++ b/config-model/src/test/derived/namecollision/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/newrank/summary.cfg b/config-model/src/test/derived/newrank/summary.cfg index 0b98b20c342..ddebcbe6ca6 100644 --- a/config-model/src/test/derived/newrank/summary.cfg +++ b/config-model/src/test/derived/newrank/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 912980235 +usev8geopositions false classes[].id 912980235 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/position_nosummary/summary.cfg b/config-model/src/test/derived/position_nosummary/summary.cfg index 4222e88cc2f..cd7c295ab11 100644 --- a/config-model/src/test/derived/position_nosummary/summary.cfg +++ b/config-model/src/test/derived/position_nosummary/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1727020212 +usev8geopositions false classes[].id 1727020212 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/position_summary/summary.cfg b/config-model/src/test/derived/position_summary/summary.cfg index f54066d865e..7b8bf16287f 100644 --- a/config-model/src/test/derived/position_summary/summary.cfg +++ b/config-model/src/test/derived/position_summary/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 230670304 +usev8geopositions false classes[].id 230670304 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/predicate_attribute/summary.cfg b/config-model/src/test/derived/predicate_attribute/summary.cfg index 9cc613107e0..10040fd71c6 100644 --- a/config-model/src/test/derived/predicate_attribute/summary.cfg +++ b/config-model/src/test/derived/predicate_attribute/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1391971216 +usev8geopositions false classes[].id 1391971216 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg b/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg index 9ab2da3f686..e37ea304b18 100644 --- a/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg +++ b/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/rankexpression/summary.cfg b/config-model/src/test/derived/rankexpression/summary.cfg index 22f4c3d4ca7..aec076aa8fe 100644 --- a/config-model/src/test/derived/rankexpression/summary.cfg +++ b/config-model/src/test/derived/rankexpression/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1753207254 +usev8geopositions false classes[].id 1753207254 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/ranktypes/documentmanager.cfg b/config-model/src/test/derived/ranktypes/documentmanager.cfg index a8bb9e904dc..dc02052c509 100644 --- a/config-model/src/test/derived/ranktypes/documentmanager.cfg +++ b/config-model/src/test/derived/ranktypes/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/ranktypes/summary.cfg b/config-model/src/test/derived/ranktypes/summary.cfg index 49b668e9edf..7ab01302d8b 100644 --- a/config-model/src/test/derived/ranktypes/summary.cfg +++ b/config-model/src/test/derived/ranktypes/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1567556360 +usev8geopositions false classes[].id 1567556360 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/reference_fields/summary.cfg b/config-model/src/test/derived/reference_fields/summary.cfg index 410bccff7b3..b23b6c5b5a8 100644 --- a/config-model/src/test/derived/reference_fields/summary.cfg +++ b/config-model/src/test/derived/reference_fields/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1987541865 +usev8geopositions false classes[].id 1987541865 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/schemainheritance/documentmanager.cfg b/config-model/src/test/derived/schemainheritance/documentmanager.cfg index 0dd87029f32..ec13eae92ff 100644 --- a/config-model/src/test/derived/schemainheritance/documentmanager.cfg +++ b/config-model/src/test/derived/schemainheritance/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/schemainheritance/rank-profiles.cfg b/config-model/src/test/derived/schemainheritance/rank-profiles.cfg index 9e68045fab0..1e2ed46e696 100644 --- a/config-model/src/test/derived/schemainheritance/rank-profiles.cfg +++ b/config-model/src/test/derived/schemainheritance/rank-profiles.cfg @@ -9,3 +9,4 @@ rankprofile[].fef.property[].value "0" rankprofile[].fef.property[].name "vespa.dump.ignoredefaultfeatures" rankprofile[].fef.property[].value "true" rankprofile[].name "child_profile" +rankprofile[].name "parent_profile" diff --git a/config-model/src/test/derived/schemainheritance/summary.cfg b/config-model/src/test/derived/schemainheritance/summary.cfg index d3286961007..6fcf5b2aaa8 100644 --- a/config-model/src/test/derived/schemainheritance/summary.cfg +++ b/config-model/src/test/derived/schemainheritance/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1313596701 +usev8geopositions false classes[].id 1313596701 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/streamingstruct/documentmanager.cfg b/config-model/src/test/derived/streamingstruct/documentmanager.cfg index 63001ea38ca..567944d3b78 100644 --- a/config-model/src/test/derived/streamingstruct/documentmanager.cfg +++ b/config-model/src/test/derived/streamingstruct/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/streamingstruct/onlydoc/documentmanager.cfg b/config-model/src/test/derived/streamingstruct/onlydoc/documentmanager.cfg index 1a1cf522fdd..cb56da169fa 100644 --- a/config-model/src/test/derived/streamingstruct/onlydoc/documentmanager.cfg +++ b/config-model/src/test/derived/streamingstruct/onlydoc/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/streamingstruct/summary.cfg b/config-model/src/test/derived/streamingstruct/summary.cfg index 655499a88be..d7b7057bf1f 100644 --- a/config-model/src/test/derived/streamingstruct/summary.cfg +++ b/config-model/src/test/derived/streamingstruct/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 569269436 +usev8geopositions false classes[].id 569269436 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/streamingstructdefault/summary.cfg b/config-model/src/test/derived/streamingstructdefault/summary.cfg index a52b34925dc..7274804a5de 100644 --- a/config-model/src/test/derived/streamingstructdefault/summary.cfg +++ b/config-model/src/test/derived/streamingstructdefault/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 718801936 +usev8geopositions false classes[].id 718801936 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/structanyorder/documentmanager.cfg b/config-model/src/test/derived/structanyorder/documentmanager.cfg index 3ffc2f22a9b..be5e473e460 100644 --- a/config-model/src/test/derived/structanyorder/documentmanager.cfg +++ b/config-model/src/test/derived/structanyorder/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/structinheritance/bad.sd b/config-model/src/test/derived/structinheritance/bad.sd new file mode 100644 index 00000000000..ef5137842ec --- /dev/null +++ b/config-model/src/test/derived/structinheritance/bad.sd @@ -0,0 +1,18 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +search bad { + document bad { + struct base { + field name type string {} + field year type int {} + } + struct onechild inherits base { + field between type string {} + } + struct childtwo inherits onechild { + field mine type string {} + field name type string {} + } + field f1 type onechild {} + } +} diff --git a/config-model/src/test/derived/structinheritance/documentmanager.cfg b/config-model/src/test/derived/structinheritance/documentmanager.cfg new file mode 100644 index 00000000000..20994bacca2 --- /dev/null +++ b/config-model/src/test/derived/structinheritance/documentmanager.cfg @@ -0,0 +1,71 @@ +enablecompression false +usev8geopositions false +datatype[0].id 1381038251 +datatype[0].structtype[0].name "position" +datatype[0].structtype[0].version 0 +datatype[0].structtype[0].compresstype NONE +datatype[0].structtype[0].compresslevel 0 +datatype[0].structtype[0].compressthreshold 95 +datatype[0].structtype[0].compressminsize 800 +datatype[0].structtype[0].field[0].name "x" +datatype[0].structtype[0].field[0].datatype 0 +datatype[0].structtype[0].field[0].detailedtype "" +datatype[0].structtype[0].field[1].name "y" +datatype[0].structtype[0].field[1].datatype 0 +datatype[0].structtype[0].field[1].detailedtype "" +datatype[1].id -1396204461 +datatype[1].structtype[0].name "base" +datatype[1].structtype[0].version 0 +datatype[1].structtype[0].compresstype NONE +datatype[1].structtype[0].compresslevel 0 +datatype[1].structtype[0].compressthreshold 95 +datatype[1].structtype[0].compressminsize 800 +datatype[1].structtype[0].field[0].name "name" +datatype[1].structtype[0].field[0].datatype 2 +datatype[1].structtype[0].field[0].detailedtype "" +datatype[2].id 746267614 +datatype[2].structtype[0].name "child" +datatype[2].structtype[0].version 0 +datatype[2].structtype[0].compresstype NONE +datatype[2].structtype[0].compresslevel 0 +datatype[2].structtype[0].compressthreshold 95 +datatype[2].structtype[0].compressminsize 800 +datatype[2].structtype[0].field[0].name "age" +datatype[2].structtype[0].field[0].datatype 0 +datatype[2].structtype[0].field[0].detailedtype "" +datatype[2].structtype[0].inherits[0].name "base" +datatype[2].structtype[0].inherits[0].version 0 +datatype[3].id 1811766610 +datatype[3].structtype[0].name "grandchild" +datatype[3].structtype[0].version 0 +datatype[3].structtype[0].compresstype NONE +datatype[3].structtype[0].compresslevel 0 +datatype[3].structtype[0].compressthreshold 95 +datatype[3].structtype[0].compressminsize 800 +datatype[3].structtype[0].field[0].name "toy" +datatype[3].structtype[0].field[0].datatype 2 +datatype[3].structtype[0].field[0].detailedtype "" +datatype[3].structtype[0].inherits[0].name "child" +datatype[3].structtype[0].inherits[0].version 0 +datatype[4].id -2142109237 +datatype[4].structtype[0].name "simple.header" +datatype[4].structtype[0].version 0 +datatype[4].structtype[0].compresstype NONE +datatype[4].structtype[0].compresslevel 0 +datatype[4].structtype[0].compressthreshold 95 +datatype[4].structtype[0].compressminsize 800 +datatype[4].structtype[0].field[0].name "f1" +datatype[4].structtype[0].field[0].datatype 746267614 +datatype[4].structtype[0].field[0].detailedtype "" +datatype[4].structtype[0].field[1].name "f2" +datatype[4].structtype[0].field[1].datatype 1811766610 +datatype[4].structtype[0].field[1].detailedtype "" +datatype[5].id 485659380 +datatype[5].documenttype[0].name "simple" +datatype[5].documenttype[0].version 0 +datatype[5].documenttype[0].inherits[0].name "document" +datatype[5].documenttype[0].inherits[0].version 0 +datatype[5].documenttype[0].headerstruct -2142109237 +datatype[5].documenttype[0].bodystruct 0 +datatype[5].documenttype[0].fieldsets{[document]}.fields[0] "f1" +datatype[5].documenttype[0].fieldsets{[document]}.fields[1] "f2" diff --git a/config-model/src/test/derived/structinheritance/documenttypes.cfg b/config-model/src/test/derived/structinheritance/documenttypes.cfg new file mode 100644 index 00000000000..52a154905c2 --- /dev/null +++ b/config-model/src/test/derived/structinheritance/documenttypes.cfg @@ -0,0 +1,102 @@ +enablecompression false +usev8geopositions false +documenttype[0].id 485659380 +documenttype[0].name "simple" +documenttype[0].version 0 +documenttype[0].headerstruct -2142109237 +documenttype[0].bodystruct 0 +documenttype[0].inherits[0].id 8 +documenttype[0].datatype[0].id 1811766610 +documenttype[0].datatype[0].type STRUCT +documenttype[0].datatype[0].array.element.id 0 +documenttype[0].datatype[0].map.key.id 0 +documenttype[0].datatype[0].map.value.id 0 +documenttype[0].datatype[0].wset.key.id 0 +documenttype[0].datatype[0].wset.createifnonexistent false +documenttype[0].datatype[0].wset.removeifzero false +documenttype[0].datatype[0].annotationref.annotation.id 0 +documenttype[0].datatype[0].sstruct.name "grandchild" +documenttype[0].datatype[0].sstruct.version 0 +documenttype[0].datatype[0].sstruct.compression.type NONE +documenttype[0].datatype[0].sstruct.compression.level 0 +documenttype[0].datatype[0].sstruct.compression.threshold 95 +documenttype[0].datatype[0].sstruct.compression.minsize 200 +documenttype[0].datatype[0].sstruct.field[0].name "toy" +documenttype[0].datatype[0].sstruct.field[0].id 536645790 +documenttype[0].datatype[0].sstruct.field[0].datatype 2 +documenttype[0].datatype[0].sstruct.field[0].detailedtype "" +documenttype[0].datatype[0].sstruct.field[1].name "age" +documenttype[0].datatype[0].sstruct.field[1].id 1862473705 +documenttype[0].datatype[0].sstruct.field[1].datatype 0 +documenttype[0].datatype[0].sstruct.field[1].detailedtype "" +documenttype[0].datatype[0].sstruct.field[2].name "name" +documenttype[0].datatype[0].sstruct.field[2].id 1160796772 +documenttype[0].datatype[0].sstruct.field[2].datatype 2 +documenttype[0].datatype[0].sstruct.field[2].detailedtype "" +documenttype[0].datatype[1].id -1396204461 +documenttype[0].datatype[1].type STRUCT +documenttype[0].datatype[1].array.element.id 0 +documenttype[0].datatype[1].map.key.id 0 +documenttype[0].datatype[1].map.value.id 0 +documenttype[0].datatype[1].wset.key.id 0 +documenttype[0].datatype[1].wset.createifnonexistent false +documenttype[0].datatype[1].wset.removeifzero false +documenttype[0].datatype[1].annotationref.annotation.id 0 +documenttype[0].datatype[1].sstruct.name "base" +documenttype[0].datatype[1].sstruct.version 0 +documenttype[0].datatype[1].sstruct.compression.type NONE +documenttype[0].datatype[1].sstruct.compression.level 0 +documenttype[0].datatype[1].sstruct.compression.threshold 95 +documenttype[0].datatype[1].sstruct.compression.minsize 200 +documenttype[0].datatype[1].sstruct.field[0].name "name" +documenttype[0].datatype[1].sstruct.field[0].id 1160796772 +documenttype[0].datatype[1].sstruct.field[0].datatype 2 +documenttype[0].datatype[1].sstruct.field[0].detailedtype "" +documenttype[0].datatype[2].id 746267614 +documenttype[0].datatype[2].type STRUCT +documenttype[0].datatype[2].array.element.id 0 +documenttype[0].datatype[2].map.key.id 0 +documenttype[0].datatype[2].map.value.id 0 +documenttype[0].datatype[2].wset.key.id 0 +documenttype[0].datatype[2].wset.createifnonexistent false +documenttype[0].datatype[2].wset.removeifzero false +documenttype[0].datatype[2].annotationref.annotation.id 0 +documenttype[0].datatype[2].sstruct.name "child" +documenttype[0].datatype[2].sstruct.version 0 +documenttype[0].datatype[2].sstruct.compression.type NONE +documenttype[0].datatype[2].sstruct.compression.level 0 +documenttype[0].datatype[2].sstruct.compression.threshold 95 +documenttype[0].datatype[2].sstruct.compression.minsize 200 +documenttype[0].datatype[2].sstruct.field[0].name "age" +documenttype[0].datatype[2].sstruct.field[0].id 1862473705 +documenttype[0].datatype[2].sstruct.field[0].datatype 0 +documenttype[0].datatype[2].sstruct.field[0].detailedtype "" +documenttype[0].datatype[2].sstruct.field[1].name "name" +documenttype[0].datatype[2].sstruct.field[1].id 1160796772 +documenttype[0].datatype[2].sstruct.field[1].datatype 2 +documenttype[0].datatype[2].sstruct.field[1].detailedtype "" +documenttype[0].datatype[3].id -2142109237 +documenttype[0].datatype[3].type STRUCT +documenttype[0].datatype[3].array.element.id 0 +documenttype[0].datatype[3].map.key.id 0 +documenttype[0].datatype[3].map.value.id 0 +documenttype[0].datatype[3].wset.key.id 0 +documenttype[0].datatype[3].wset.createifnonexistent false +documenttype[0].datatype[3].wset.removeifzero false +documenttype[0].datatype[3].annotationref.annotation.id 0 +documenttype[0].datatype[3].sstruct.name "simple.header" +documenttype[0].datatype[3].sstruct.version 0 +documenttype[0].datatype[3].sstruct.compression.type NONE +documenttype[0].datatype[3].sstruct.compression.level 0 +documenttype[0].datatype[3].sstruct.compression.threshold 95 +documenttype[0].datatype[3].sstruct.compression.minsize 200 +documenttype[0].datatype[3].sstruct.field[0].name "f1" +documenttype[0].datatype[3].sstruct.field[0].id 750623154 +documenttype[0].datatype[3].sstruct.field[0].datatype 746267614 +documenttype[0].datatype[3].sstruct.field[0].detailedtype "" +documenttype[0].datatype[3].sstruct.field[1].name "f2" +documenttype[0].datatype[3].sstruct.field[1].id 1523850983 +documenttype[0].datatype[3].sstruct.field[1].datatype 1811766610 +documenttype[0].datatype[3].sstruct.field[1].detailedtype "" +documenttype[0].fieldsets{[document]}.fields[0] "f1" +documenttype[0].fieldsets{[document]}.fields[1] "f2" diff --git a/config-model/src/test/derived/structinheritance/simple.sd b/config-model/src/test/derived/structinheritance/simple.sd new file mode 100644 index 00000000000..8b4bb6150c1 --- /dev/null +++ b/config-model/src/test/derived/structinheritance/simple.sd @@ -0,0 +1,17 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +search simple { + document simple { + struct base { + field name type string {} + } + struct child inherits base { + field age type int {} + } + struct grandchild inherits child { + field toy type string {} + } + field f1 type child {} + field f2 type grandchild {} + } +} diff --git a/config-model/src/test/derived/tensor/documenttypes.cfg b/config-model/src/test/derived/tensor/documenttypes.cfg index acf5c7ed12f..879b455a711 100644 --- a/config-model/src/test/derived/tensor/documenttypes.cfg +++ b/config-model/src/test/derived/tensor/documenttypes.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false documenttype[].id -1290043429 documenttype[].name "tensor" documenttype[].version 0 diff --git a/config-model/src/test/derived/tensor/summary.cfg b/config-model/src/test/derived/tensor/summary.cfg index 355cba0e561..de6a2a6e386 100644 --- a/config-model/src/test/derived/tensor/summary.cfg +++ b/config-model/src/test/derived/tensor/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 898020074 +usev8geopositions false classes[].id 898020074 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg b/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg index 19d00483a5a..f121eb7628a 100644 --- a/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg +++ b/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/twostreamingstructs/summary.cfg b/config-model/src/test/derived/twostreamingstructs/summary.cfg index 28f19e6fe25..969e91d4b87 100644 --- a/config-model/src/test/derived/twostreamingstructs/summary.cfg +++ b/config-model/src/test/derived/twostreamingstructs/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 569269436 +usev8geopositions false classes[].id 569269436 classes[].name "default" classes[].fields[].name "coupleof" diff --git a/config-model/src/test/derived/types/documentmanager.cfg b/config-model/src/test/derived/types/documentmanager.cfg index 9556f77f6d9..ffdf090f761 100644 --- a/config-model/src/test/derived/types/documentmanager.cfg +++ b/config-model/src/test/derived/types/documentmanager.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[].id 1381038251 datatype[].structtype[].name "position" datatype[].structtype[].version 0 diff --git a/config-model/src/test/derived/types/summary.cfg b/config-model/src/test/derived/types/summary.cfg index e0e67a5669d..fc3b9e655ca 100644 --- a/config-model/src/test/derived/types/summary.cfg +++ b/config-model/src/test/derived/types/summary.cfg @@ -1,4 +1,5 @@ defaultsummaryid 1131946680 +usev8geopositions false classes[].id 1131946680 classes[].name "default" classes[].omitsummaryfeatures false diff --git a/config-model/src/test/examples/fieldoftypedocument-doctypes.cfg b/config-model/src/test/examples/fieldoftypedocument-doctypes.cfg new file mode 100644 index 00000000000..a7a4c675311 --- /dev/null +++ b/config-model/src/test/examples/fieldoftypedocument-doctypes.cfg @@ -0,0 +1,69 @@ +enablecompression false +usev8geopositions false +doctype[0].name "document" +doctype[0].idx 10000 +doctype[0].contentstruct 10001 +doctype[0].primitivetype[0].idx 10002 +doctype[0].primitivetype[0].name "byte" +doctype[0].primitivetype[1].idx 10003 +doctype[0].primitivetype[1].name "int" +doctype[0].primitivetype[2].idx 10004 +doctype[0].primitivetype[2].name "long" +doctype[0].primitivetype[3].idx 10005 +doctype[0].primitivetype[3].name "string" +doctype[0].primitivetype[4].idx 10006 +doctype[0].primitivetype[4].name "raw" +doctype[0].primitivetype[5].idx 10008 +doctype[0].primitivetype[5].name "float" +doctype[0].primitivetype[6].idx 10009 +doctype[0].primitivetype[6].name "double" +doctype[0].primitivetype[7].idx 10011 +doctype[0].primitivetype[7].name "uri" +doctype[0].primitivetype[8].idx 10012 +doctype[0].primitivetype[8].name "predicate" +doctype[0].primitivetype[9].idx 10013 +doctype[0].primitivetype[9].name "bool" +doctype[0].primitivetype[10].idx 10014 +doctype[0].primitivetype[10].name "float16" +doctype[0].wsettype[0].idx 10007 +doctype[0].wsettype[0].elementtype 10005 +doctype[0].wsettype[0].createifnonexistent true +doctype[0].wsettype[0].removeifzero true +doctype[0].structtype[0].idx 10001 +doctype[0].structtype[0].name "document.header" +doctype[0].structtype[1].idx 10010 +doctype[0].structtype[1].name "position" +doctype[0].structtype[1].field[0].name "x" +doctype[0].structtype[1].field[0].internalid 914677694 +doctype[0].structtype[1].field[0].type 10003 +doctype[0].structtype[1].field[1].name "y" +doctype[0].structtype[1].field[1].internalid 900009410 +doctype[0].structtype[1].field[1].type 10003 +doctype[1].name "book" +doctype[1].idx 10015 +doctype[1].inherits[0].idx 10000 +doctype[1].contentstruct 10016 +doctype[1].fieldsets{[document]}.fields[0] "soundtrack" +doctype[1].structtype[0].idx 10016 +doctype[1].structtype[0].name "book.header" +doctype[1].structtype[0].field[0].name "soundtrack" +doctype[1].structtype[0].field[0].internalid 1258961213 +doctype[1].structtype[0].field[0].type 10017 +doctype[2].name "music" +doctype[2].idx 10017 +doctype[2].inherits[0].idx 10000 +doctype[2].contentstruct 10018 +doctype[2].fieldsets{[document]}.fields[0] "intfield" +doctype[2].fieldsets{[document]}.fields[1] "longfield" +doctype[2].fieldsets{[document]}.fields[2] "stringfield" +doctype[2].structtype[0].idx 10018 +doctype[2].structtype[0].name "music.header" +doctype[2].structtype[0].field[0].name "intfield" +doctype[2].structtype[0].field[0].internalid 435380425 +doctype[2].structtype[0].field[0].type 10003 +doctype[2].structtype[0].field[1].name "stringfield" +doctype[2].structtype[0].field[1].internalid 1182460484 +doctype[2].structtype[0].field[1].type 10005 +doctype[2].structtype[0].field[2].name "longfield" +doctype[2].structtype[0].field[2].internalid 1589309697 +doctype[2].structtype[0].field[2].type 10004 diff --git a/config-model/src/test/examples/fieldoftypedocument.cfg b/config-model/src/test/examples/fieldoftypedocument.cfg index 8074d86b45f..82a30012a07 100644 --- a/config-model/src/test/examples/fieldoftypedocument.cfg +++ b/config-model/src/test/examples/fieldoftypedocument.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 @@ -22,37 +23,37 @@ datatype[1].structtype[0].compressminsize 800 datatype[1].structtype[0].field[0].name "soundtrack" datatype[1].structtype[0].field[0].datatype 1412693671 datatype[1].structtype[0].field[0].detailedtype "" -datatype[2].id -1383388565 -datatype[2].documenttype[0].name "book" +datatype[2].id 1412693671 +datatype[2].documenttype[0].name "music" datatype[2].documenttype[0].version 0 datatype[2].documenttype[0].inherits[0].name "document" datatype[2].documenttype[0].inherits[0].version 0 -datatype[2].documenttype[0].headerstruct -1344444812 +datatype[2].documenttype[0].headerstruct -1910204744 datatype[2].documenttype[0].bodystruct 0 -datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack" -datatype[3].id -1910204744 -datatype[3].structtype[0].name "music.header" -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].compresstype NONE -datatype[3].structtype[0].compresslevel 0 -datatype[3].structtype[0].compressthreshold 95 -datatype[3].structtype[0].compressminsize 800 -datatype[3].structtype[0].field[0].name "intfield" -datatype[3].structtype[0].field[0].datatype 0 -datatype[3].structtype[0].field[0].detailedtype "" -datatype[3].structtype[0].field[1].name "stringfield" -datatype[3].structtype[0].field[1].datatype 2 -datatype[3].structtype[0].field[1].detailedtype "" -datatype[3].structtype[0].field[2].name "longfield" -datatype[3].structtype[0].field[2].datatype 4 -datatype[3].structtype[0].field[2].detailedtype "" -datatype[4].id 1412693671 -datatype[4].documenttype[0].name "music" -datatype[4].documenttype[0].version 0 -datatype[4].documenttype[0].inherits[0].name "document" -datatype[4].documenttype[0].inherits[0].version 0 -datatype[4].documenttype[0].headerstruct -1910204744 -datatype[4].documenttype[0].bodystruct 0 -datatype[4].documenttype[0].fieldsets{[document]}.fields[0] "intfield" -datatype[4].documenttype[0].fieldsets{[document]}.fields[1] "longfield" -datatype[4].documenttype[0].fieldsets{[document]}.fields[2] "stringfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "intfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[1] "longfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[2] "stringfield" +datatype[3].id -1383388565 +datatype[3].documenttype[0].name "book" +datatype[3].documenttype[0].version 0 +datatype[3].documenttype[0].inherits[0].name "document" +datatype[3].documenttype[0].inherits[0].version 0 +datatype[3].documenttype[0].headerstruct -1344444812 +datatype[3].documenttype[0].bodystruct 0 +datatype[3].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack" +datatype[4].id -1910204744 +datatype[4].structtype[0].name "music.header" +datatype[4].structtype[0].version 0 +datatype[4].structtype[0].compresstype NONE +datatype[4].structtype[0].compresslevel 0 +datatype[4].structtype[0].compressthreshold 95 +datatype[4].structtype[0].compressminsize 800 +datatype[4].structtype[0].field[0].name "intfield" +datatype[4].structtype[0].field[0].datatype 0 +datatype[4].structtype[0].field[0].detailedtype "" +datatype[4].structtype[0].field[1].name "stringfield" +datatype[4].structtype[0].field[1].datatype 2 +datatype[4].structtype[0].field[1].detailedtype "" +datatype[4].structtype[0].field[2].name "longfield" +datatype[4].structtype[0].field[2].datatype 4 +datatype[4].structtype[0].field[2].detailedtype "" diff --git a/config-model/src/test/examples/structresult.cfg b/config-model/src/test/examples/structresult.cfg index ceaad2e6584..b5b90245858 100755..100644 --- a/config-model/src/test/examples/structresult.cfg +++ b/config-model/src/test/examples/structresult.cfg @@ -1,4 +1,5 @@ enablecompression false +usev8geopositions false datatype[0].id 1381038251 datatype[0].structtype[0].name "position" datatype[0].structtype[0].version 0 @@ -12,32 +13,32 @@ datatype[0].structtype[0].field[0].detailedtype "" datatype[0].structtype[0].field[1].name "y" datatype[0].structtype[0].field[1].datatype 0 datatype[0].structtype[0].field[1].detailedtype "" -datatype[1].id 93505813 -datatype[1].structtype[0].name "bar" -datatype[1].structtype[0].version 0 -datatype[1].structtype[0].compresstype NONE -datatype[1].structtype[0].compresslevel 0 -datatype[1].structtype[0].compressthreshold 95 -datatype[1].structtype[0].compressminsize 800 -datatype[1].structtype[0].field[0].name "humbe" -datatype[1].structtype[0].field[0].datatype 97614088 -datatype[1].structtype[0].field[0].detailedtype "" -datatype[2].id 97614088 -datatype[2].structtype[0].name "foo" +datatype[1].id -1245205573 +datatype[1].arraytype[0].datatype 97614088 +datatype[2].id 93505813 +datatype[2].structtype[0].name "bar" datatype[2].structtype[0].version 0 datatype[2].structtype[0].compresstype NONE datatype[2].structtype[0].compresslevel 0 datatype[2].structtype[0].compressthreshold 95 datatype[2].structtype[0].compressminsize 800 -datatype[2].structtype[0].field[0].name "fubar" -datatype[2].structtype[0].field[0].datatype 0 +datatype[2].structtype[0].field[0].name "humbe" +datatype[2].structtype[0].field[0].datatype 97614088 datatype[2].structtype[0].field[0].detailedtype "" -datatype[2].structtype[0].field[1].name "bar" -datatype[2].structtype[0].field[1].id[0].id 1 -datatype[2].structtype[0].field[1].datatype 2 -datatype[2].structtype[0].field[1].detailedtype "" -datatype[3].id -1245205573 -datatype[3].arraytype[0].datatype 97614088 +datatype[3].id 97614088 +datatype[3].structtype[0].name "foo" +datatype[3].structtype[0].version 0 +datatype[3].structtype[0].compresstype NONE +datatype[3].structtype[0].compresslevel 0 +datatype[3].structtype[0].compressthreshold 95 +datatype[3].structtype[0].compressminsize 800 +datatype[3].structtype[0].field[0].name "fubar" +datatype[3].structtype[0].field[0].datatype 0 +datatype[3].structtype[0].field[0].detailedtype "" +datatype[3].structtype[0].field[1].name "bar" +datatype[3].structtype[0].field[1].id[0].id 1 +datatype[3].structtype[0].field[1].datatype 2 +datatype[3].structtype[0].field[1].detailedtype "" datatype[4].id -1910204744 datatype[4].structtype[0].name "music.header" datatype[4].structtype[0].version 0 diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java index 2fcb9632357..0866e1174ee 100644 --- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java +++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java @@ -1488,7 +1488,10 @@ public class ModelProvisioningTest { fail("Expected exception"); } catch (IllegalArgumentException e) { - assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. Merge jvmargs into jvm-options.", e.getMessage()); + assertEquals("You have specified both jvm-options='xyz' and deprecated jvmargs='abc'. " + + "Merge jvmargs into 'options' in 'jvm' element. " + + "See https://docs.vespa.ai/en/reference/services-container.html#jvm", + e.getMessage()); } } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java index a08ec110219..8ece5cd0fe4 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java @@ -13,6 +13,10 @@ import static helpers.CompareConfigTestHelper.assertSerializedConfigFileEquals; public abstract class AbstractSchemaTestCase { protected static void assertConfigFile(String filename, String cfg) throws IOException { + IOUtils.writeFile(filename + ".actual", cfg, false); + if (! cfg.endsWith("\n")) { + IOUtils.writeFile(filename + ".actual", "\n", true); + } assertSerializedConfigFileEquals(filename, cfg); } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java index f57c63b440f..30cda8b5f42 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java @@ -152,6 +152,7 @@ public class DocumentGraphValidatorTest { return campaignSchema; } + @SuppressWarnings("deprecation") private static void createDocumentReference(Schema from, Schema to, String refFieldName) { SDField refField = new TemporarySDField(refFieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(to.getName()))); SDDocumentType fromDocument = from.getDocument(); diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java index 260b7a98fe7..19964700dd0 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java @@ -56,6 +56,7 @@ public class DocumentReferenceResolverTest { assertSame(fooRefToBarField, fooReferenceMap.get("bar_ref").referenceField()); } + @SuppressWarnings("deprecation") @Test public void throws_user_friendly_exception_if_referenced_document_does_not_exist() { // Create foo document with document reference to non-existing document bar diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java index fdd7fe95c45..bab56c9db2c 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java @@ -31,6 +31,10 @@ public class FieldOfTypeDocumentTestCase extends AbstractSchemaTestCase { assertConfigFile("src/test/examples/fieldoftypedocument.cfg", new DocumentmanagerConfig(value).toString() + "\n"); + value = Deriver.getDocumentManagerConfig(sds, true); + assertConfigFile("src/test/examples/fieldoftypedocument-doctypes.cfg", + new DocumentmanagerConfig(value).toString() + "\n"); + DocumentTypeManager manager = new DocumentTypeManager(); DocumentTypeManagerConfigurer.configure(manager, "raw:" + new DocumentmanagerConfig(value).toString()); diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/StructTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/StructTestCase.java index 0c305df7e53..34093bf72ef 100755 --- a/config-model/src/test/java/com/yahoo/searchdefinition/StructTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/StructTestCase.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchdefinition; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.searchdefinition.derived.Deriver; import com.yahoo.searchdefinition.parser.ParseException; diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java index da14e4a8929..8b54455d176 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java @@ -5,7 +5,7 @@ import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.application.provider.MockFileRegistry; import com.yahoo.config.model.deploy.TestProperties; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.searchdefinition.Schema; import com.yahoo.searchdefinition.SchemaBuilder; @@ -28,6 +28,8 @@ public abstract class AbstractExportingTestCase extends AbstractSchemaTestCase { private static final String tempDir = "temp/"; private static final String searchDefRoot = "src/test/derived/"; + boolean useV8DocManagerCfg() { return false; } + private DerivedConfiguration derive(String dirName, String searchDefinitionName, TestProperties properties, @@ -64,7 +66,8 @@ public abstract class AbstractExportingTestCase extends AbstractSchemaTestCase { private DerivedConfiguration export(String name, SchemaBuilder builder, DerivedConfiguration config) throws IOException { String path = exportConfig(name, config); - DerivedConfiguration.exportDocuments(new DocumentManager().produce(builder.getModel(), new DocumentmanagerConfig.Builder()), path); + DerivedConfiguration.exportDocuments(new DocumentManager().useV8DocManagerCfg(useV8DocManagerCfg()) + .produce(builder.getModel(), new DocumentmanagerConfig.Builder()), path); DerivedConfiguration.exportDocuments(new DocumentTypes().produce(builder.getModel(), new DocumenttypesConfig.Builder()), path); DerivedConfiguration.exportQueryProfiles(builder.getQueryProfileRegistry(), path); return config; diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java index 79df1fc9501..f00072a5a19 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java @@ -31,6 +31,9 @@ import static org.junit.Assert.assertNull; */ public class InheritanceTestCase extends AbstractExportingTestCase { + @Override + boolean useV8DocManagerCfg() { return true; } + @Rule public TemporaryFolder tmpDir = new TemporaryFolder(); diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java index c6f254df798..b3f2fb62ac2 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java @@ -88,6 +88,7 @@ public class SchemaOrdererTestCase extends AbstractSchemaTestCase { assertEquals(expectedSearchOrder, actualSearchOrder); } + @SuppressWarnings("deprecation") private static void createDocumentReference(Schema from, Schema to, String refFieldName) { SDField refField = new TemporarySDField(refFieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(to.getName()))); SDDocumentType fromDocument = from.getDocument(); diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/StructInheritanceTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/StructInheritanceTestCase.java new file mode 100644 index 00000000000..19bd8305fa5 --- /dev/null +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/StructInheritanceTestCase.java @@ -0,0 +1,62 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.searchdefinition.derived; + +import com.yahoo.document.DataType; +import com.yahoo.document.config.DocumentmanagerConfig; +import com.yahoo.searchdefinition.Index; +import com.yahoo.searchdefinition.Schema; +import com.yahoo.searchdefinition.SchemaBuilder; +import com.yahoo.searchdefinition.document.SDDocumentType; +import com.yahoo.searchdefinition.document.SDField; +import com.yahoo.searchdefinition.parser.ParseException; +import com.yahoo.vespa.configmodel.producers.DocumentManager; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import org.junit.rules.TemporaryFolder; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +/** + * Tests struct inheritance + * + * @author arnej + */ +public class StructInheritanceTestCase extends AbstractExportingTestCase { + + @Rule + public TemporaryFolder tmpDir = new TemporaryFolder(); + + @Rule + public final ExpectedException exceptionRule = ExpectedException.none(); + + @Test + public void requireThatStructCanInherit() throws IOException, ParseException { + String dir = "src/test/derived/structinheritance/"; + SchemaBuilder builder = new SchemaBuilder(); + builder.importFile(dir + "simple.sd"); + builder.build(false); + derive("structinheritance", builder, builder.getSchema("simple")); + assertCorrectConfigFiles("structinheritance"); + } + + @Test + public void requireThatRedeclareIsNotAllowed() throws IOException, ParseException { + exceptionRule.expect(IllegalArgumentException.class); + exceptionRule.expectMessage("cannot inherit from base and redeclare field name"); + String dir = "src/test/derived/structinheritance/"; + SchemaBuilder builder = new SchemaBuilder(); + builder.importFile(dir + "bad.sd"); + builder.build(); + derive("structinheritance", builder, builder.getSchema("bad")); + } + +} diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java index 55702c9e1a7..138992477c0 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; */ public class VsmFieldsTestCase { + @SuppressWarnings("deprecation") @Test public void reference_type_field_is_unsearchable() { Schema schema = new Schema("test", new Application(MockApplicationPackage.createEmpty()), new MockFileRegistry(), new TestableDeployLogger(), new TestProperties()); diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java index a01fdd8725d..74fa7c72554 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java @@ -44,6 +44,7 @@ public class ParentChildSearchModel { return result; } + @SuppressWarnings("deprecation") protected static SDField createRefField(String parentType, String fieldName) { return new TemporarySDField(fieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(parentType))); } diff --git a/config-model/src/test/java/com/yahoo/vespa/documentmodel/AbstractReferenceFieldTestCase.java b/config-model/src/test/java/com/yahoo/vespa/documentmodel/AbstractReferenceFieldTestCase.java index 0ae77238cd3..c0d74cd02d3 100644 --- a/config-model/src/test/java/com/yahoo/vespa/documentmodel/AbstractReferenceFieldTestCase.java +++ b/config-model/src/test/java/com/yahoo/vespa/documentmodel/AbstractReferenceFieldTestCase.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.documentmodel; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.searchdefinition.AbstractSchemaTestCase; import com.yahoo.vespa.configmodel.producers.DocumentManager; diff --git a/config-model/src/test/java/com/yahoo/vespa/documentmodel/DocumentModelBuilderTestCase.java b/config-model/src/test/java/com/yahoo/vespa/documentmodel/DocumentModelBuilderTestCase.java index 79893385199..9dfd05b6fdc 100644 --- a/config-model/src/test/java/com/yahoo/vespa/documentmodel/DocumentModelBuilderTestCase.java +++ b/config-model/src/test/java/com/yahoo/vespa/documentmodel/DocumentModelBuilderTestCase.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.documentmodel; -import com.yahoo.document.DocumenttypesConfig; +import com.yahoo.document.config.DocumenttypesConfig; import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.searchdefinition.SchemaBuilder; import com.yahoo.searchdefinition.AbstractSchemaTestCase; diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java index 87c7c898f96..3cfde4c4d19 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java @@ -1,13 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; +import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import org.junit.Test; import java.util.List; import static com.yahoo.vespa.model.application.validation.change.ConfigChangeTestUtils.newRestartAction; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; public class AttributeChangeValidatorTest { @@ -22,7 +27,9 @@ public class AttributeChangeValidatorTest { currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), - nextDocType()); + nextDocType(), + new ValidationOverrides(List.of()), + new ManualClock().instant()); } @Override @@ -202,4 +209,19 @@ public class AttributeChangeValidatorTest { "Field 'f1' changed: change hnsw index property " + "'neighbors-to-explore-at-insert' from '200' to '100'")); } + + @Test + public void removing_paged_requires_override() throws Exception { + try { + new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", + "field f1 type tensor(x[10]) { indexing: attribute }"). + assertValidation(); + fail("Expected exception on removal of 'paged'"); + } + catch (ValidationOverrides.ValidationException e) { + assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); + } + } + + } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java index d5c84be2008..aba5c2aa05c 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java @@ -2,7 +2,9 @@ package com.yahoo.vespa.model.application.validation.change.search; import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import org.junit.Test; @@ -25,7 +27,9 @@ public class DocumentDatabaseChangeValidatorTest { currentDb(), currentDocType(), nextDb(), - nextDocType()); + nextDocType(), + new ValidationOverrides(List.of()), + new ManualClock().instant()); } @Override diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java index 6d3e83af927..68c623ec9a3 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java @@ -802,7 +802,7 @@ public class ContentBuilderTest extends DomBuilderTest { public void ensureFeedSequencerIsControlledByFlag() { verifyFeedSequencer("LATENCY", "LATENCY"); verifyFeedSequencer("ADAPTIVE", "ADAPTIVE"); - verifyFeedSequencer("THROUGHPUT", "LATENCY", 0); + verifyFeedSequencer("THROUGHPUT", "THROUGHPUT", 0); verifyFeedSequencer("THROUGHPUT", "THROUGHPUT", 0.1); verifyFeedSequencer("THOUGHPUT", "LATENCY"); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java index 2016cea02a9..560ac28b6f7 100755 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java @@ -39,9 +39,18 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Objects; +import java.util.OptionalInt; +import java.util.OptionalLong; import java.util.Set; import java.util.stream.Collectors; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.exclusive; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.shared; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.sharedLayer4; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.Scope.application; +import static com.yahoo.config.model.api.ApplicationClusterEndpoint.Scope.global; +import static com.yahoo.config.provision.SystemName.cd; +import static com.yahoo.config.provision.SystemName.main; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; @@ -358,32 +367,78 @@ public class ContainerClusterTest { @Test public void generatesCorrectRoutingInfo() { + // main system: + assertNames(main, + ApplicationId.from("t1", "a1", "i1"), + Set.of(), + List.of("search-cluster.i1.a1.t1.endpoint.suffix"), + List.of("search-cluster--i1--a1--t1.endpoint.suffix")); + + assertNames(main, + ApplicationId.from("t1", "a1", "default"), + Set.of(), + List.of("search-cluster.a1.t1.endpoint.suffix"), + List.of("search-cluster--a1--t1.endpoint.suffix")); - assertNames(ApplicationId.from("t1", "a1", "i1"), + assertNames(main, + ApplicationId.from("t1", "default", "default"), Set.of(), - List.of("search-cluster.i1.a1.t1.endpoint.suffix", "search-cluster--i1--a1--t1.endpoint.suffix")); + List.of("search-cluster.default.t1.endpoint.suffix"), + List.of("search-cluster--default--t1.endpoint.suffix")); + + assertNames(main, + ApplicationId.from("t1", "a1", "default"), + Set.of(new ContainerEndpoint("not-in-this-cluster", global, List.of("foo", "bar"))), + List.of("search-cluster.a1.t1.endpoint.suffix"), + List.of("search-cluster--a1--t1.endpoint.suffix")); + + assertNames(main, + ApplicationId.from("t1", "a1", "default"), + Set.of(new ContainerEndpoint("search-cluster", global, List.of("rotation-1.x.y.z", "rotation-2.x.y.z"), OptionalInt.empty(), sharedLayer4), + new ContainerEndpoint("search-cluster", application, List.of("app-rotation.x.y.z"), OptionalInt.of(3), sharedLayer4)), + List.of("search-cluster.a1.t1.endpoint.suffix", "rotation-1.x.y.z", "rotation-2.x.y.z", "app-rotation.x.y.z"), + List.of("search-cluster--a1--t1.endpoint.suffix")); + + // cd system: + assertNames(cd, + ApplicationId.from("t1", "a1", "i1"), + Set.of(), + List.of("search-cluster.cd.i1.a1.t1.endpoint.suffix"), + List.of("search-cluster--cd--i1--a1--t1.endpoint.suffix")); - assertNames(ApplicationId.from("t1", "a1", "default"), + assertNames(cd, + ApplicationId.from("t1", "a1", "default"), Set.of(), - List.of("search-cluster.a1.t1.endpoint.suffix", "search-cluster--a1--t1.endpoint.suffix")); + List.of("search-cluster.cd.a1.t1.endpoint.suffix"), + List.of("search-cluster--cd--a1--t1.endpoint.suffix")); - assertNames(ApplicationId.from("t1", "default", "default"), + assertNames(cd, + ApplicationId.from("t1", "default", "default"), Set.of(), - List.of("search-cluster.default.t1.endpoint.suffix", "search-cluster--default--t1.endpoint.suffix")); + List.of("search-cluster.cd.default.t1.endpoint.suffix"), + List.of("search-cluster--cd--default--t1.endpoint.suffix")); + + assertNames(cd, + ApplicationId.from("t1", "a1", "default"), + Set.of(new ContainerEndpoint("not-in-this-cluster", global, List.of("foo", "bar"))), + List.of("search-cluster.cd.a1.t1.endpoint.suffix"), + List.of("search-cluster--cd--a1--t1.endpoint.suffix")); - assertNames(ApplicationId.from("t1", "a1", "default"), - Set.of(new ContainerEndpoint("not-in-this-cluster", ApplicationClusterEndpoint.Scope.global, List.of("foo", "bar"))), - List.of("search-cluster.a1.t1.endpoint.suffix", "search-cluster--a1--t1.endpoint.suffix")); + assertNames(cd, + ApplicationId.from("t1", "a1", "default"), + Set.of(new ContainerEndpoint("search-cluster", global, List.of("rotation-1.x.y.z", "rotation-2.x.y.z"), OptionalInt.empty(), sharedLayer4), + new ContainerEndpoint("search-cluster", global, List.of("a--b.x.y.z", "rotation-2.x.y.z"), OptionalInt.empty(), shared), + new ContainerEndpoint("search-cluster", application, List.of("app-rotation.x.y.z"), OptionalInt.of(3), sharedLayer4), + new ContainerEndpoint("not-supported", global, List.of("not.supported"), OptionalInt.empty(), exclusive)), + List.of("search-cluster.cd.a1.t1.endpoint.suffix", "rotation-1.x.y.z", "rotation-2.x.y.z", "app-rotation.x.y.z"), + List.of("search-cluster--cd--a1--t1.endpoint.suffix", "a--b.x.y.z", "rotation-2.x.y.z")); - assertNames(ApplicationId.from("t1", "a1", "default"), - Set.of(new ContainerEndpoint("search-cluster", ApplicationClusterEndpoint.Scope.global, List.of("rotation-1.x.y.z", "rotation-2.x.y.z")), - new ContainerEndpoint("search-cluster", ApplicationClusterEndpoint.Scope.application, List.of("app-rotation.x.y.z"))), - List.of("search-cluster.a1.t1.endpoint.suffix", "search-cluster--a1--t1.endpoint.suffix", "rotation-1.x.y.z", "rotation-2.x.y.z", "app-rotation.x.y.z")); } - private void assertNames(ApplicationId appId, Set<ContainerEndpoint> globalEndpoints, List<String> expectedNames) { + private void assertNames(SystemName systemName, ApplicationId appId, Set<ContainerEndpoint> globalEndpoints, List<String> expectedSharedL4Names, List<String> expectedSharedNames) { + Zone zone = new Zone(systemName, Environment.defaultEnvironment(), RegionName.defaultName()); DeployState state = new DeployState.Builder() - .zone(Zone.defaultZone()) + .zone(zone) .endpoints(globalEndpoints) .properties(new TestProperties() .setHostedVespa(true) @@ -395,8 +450,26 @@ public class ContainerClusterTest { addContainer(root, cluster, "c1", "host-c1"); cluster.doPrepare(state); List<ApplicationClusterEndpoint> endpoints = cluster.endpoints(); + + assertNames(expectedSharedNames, endpoints.stream().filter(e -> e.routingMethod() == shared).collect(Collectors.toList())); + assertNames(expectedSharedL4Names, endpoints.stream().filter(e -> e.routingMethod() == sharedLayer4).collect(Collectors.toList())); + + List<ContainerEndpoint> endpointsWithWeight = + globalEndpoints.stream().filter(endpoint -> endpoint.weight().isPresent()).collect(Collectors.toList()); + endpointsWithWeight.stream() + .filter(ce -> ce.weight().isPresent()) + .forEach(ce -> assertTrue(endpointsMatch(ce, endpoints))); + } + + private void assertNames(List<String> expectedNames, List<ApplicationClusterEndpoint> endpoints) { assertEquals(expectedNames.size(), endpoints.size()); - expectedNames.forEach(expected -> assertTrue("Endpoint not matched " + expected, endpoints.stream().anyMatch(e -> Objects.equals(e.dnsName().value(), expected)))); + expectedNames.forEach(expected -> assertTrue("Endpoint not matched " + expected + " was: " + endpoints, endpoints.stream().anyMatch(e -> Objects.equals(e.dnsName().value(), expected)))); + } + + private boolean endpointsMatch(ContainerEndpoint configuredEndpoint, List<ApplicationClusterEndpoint> clusterEndpoints) { + return clusterEndpoints.stream().anyMatch(e -> + configuredEndpoint.names().contains(e.dnsName().value()) && + configuredEndpoint.weight().getAsInt() == e.weight()); } private void verifyTesterApplicationInstalledBundles(Zone zone, List<String> expectedBundleNames) { diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg index 08b0e6809ce..26aab134699 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg @@ -1 +1 @@ -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg index fc77c5d82fa..5f48b7b75c2 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg @@ -11,4 +11,4 @@ queryprofile[1].property[0].overridable "" queryprofile[1].reference[0].name "a" queryprofile[1].reference[0].value "a1" queryprofile[1].reference[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg index 337623bc448..954a6b8d68a 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg @@ -18,4 +18,4 @@ queryprofile[0].queryprofilevariant[0].fordimensionvalues[2] "sc" queryprofile[0].queryprofilevariant[0].property[0].name "scthumbnail.sourcecountry" queryprofile[0].queryprofilevariant[0].property[0].value "uk" queryprofile[0].queryprofilevariant[0].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg index b3e41d88233..33ef2610d1d 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg @@ -27,4 +27,4 @@ queryprofile[1].type "" queryprofile[1].reference[0].name "source.news" queryprofile[1].reference[0].value "backend/news" queryprofile[1].reference[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg index 8cf8385f397..c10e0b93560 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg @@ -46,4 +46,4 @@ queryprofile[2].type "" queryprofile[2].property[0].name "a" queryprofile[2].property[0].value "a1" queryprofile[2].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg index 0bce407e8ef..2f9879dc721 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg @@ -33,8 +33,8 @@ queryprofile[1].queryprofilevariant[0].property[0].value "default" queryprofile[1].queryprofilevariant[0].property[0].overridable "" queryprofile[1].queryprofilevariant[0].reference[0].name "model" queryprofile[1].queryprofilevariant[0].reference[0].value "querylove" -queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*" queryprofile[1].queryprofilevariant[0].reference[0].overridable "" +queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*" queryprofile[1].queryprofilevariant[1].fordimensionvalues[1] "default" queryprofile[1].queryprofilevariant[1].property[0].name "model.defaultIndex" queryprofile[1].queryprofilevariant[1].property[0].value "default" diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg index 54997e152f3..18fc48fc7c9 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg @@ -103,4 +103,4 @@ queryprofiletype[2].field[0].type "string" queryprofiletype[2].field[0].overridable false queryprofiletype[2].field[0].mandatory false queryprofiletype[2].field[0].alias "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg index fdfd1955491..bb125065671 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg @@ -20,9 +20,9 @@ queryprofile[2].queryprofilevariant[0].property[0].value "a.b.x1" queryprofile[2].queryprofilevariant[0].property[0].overridable "" queryprofile[2].queryprofilevariant[0].reference[0].name "a" queryprofile[2].queryprofilevariant[0].reference[0].value "a2" -queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[2].queryprofilevariant[0].reference[0].overridable "" +queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[2].queryprofilevariant[1].property[0].name "a.b" queryprofile[2].queryprofilevariant[1].property[0].value "a.b.x2" queryprofile[2].queryprofilevariant[1].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg index 6f66a3bd441..f867ca9a56b 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg @@ -17,4 +17,4 @@ queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[1].queryprofilevariant[1].property[0].name "a.b" queryprofile[1].queryprofilevariant[1].property[0].value "a.b.x2" queryprofile[1].queryprofilevariant[1].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java index 5516c74f9a6..8ceb74c3d7e 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java @@ -34,10 +34,8 @@ import com.yahoo.container.handler.metrics.MetricsV2Handler; import com.yahoo.container.handler.observability.ApplicationStatusHandler; import com.yahoo.container.jdisc.JdiscBindingsConfig; import com.yahoo.container.jdisc.secretstore.SecretStoreConfig; -import com.yahoo.container.servlet.ServletConfigConfig; import com.yahoo.container.usability.BindingsOverviewHandler; import com.yahoo.jdisc.http.ConnectorConfig; -import com.yahoo.jdisc.http.ServletPathsConfig; import com.yahoo.net.HostName; import com.yahoo.path.Path; import com.yahoo.prelude.cluster.QrMonitorConfig; @@ -321,40 +319,6 @@ public class ContainerModelBuilderTest extends ContainerModelBuilderTestBase { } @Test - public void servlets_are_included_in_ServletPathConfig() { - createClusterWithServlet(); - ServletPathsConfig servletPathsConfig = root.getConfig(ServletPathsConfig.class, "default"); - assertThat(servletPathsConfig.servlets().values().iterator().next().path(), is("p/a/t/h")); - } - - @Test - public void servletconfig_is_produced() { - createClusterWithServlet(); - - String configId = getContainerCluster("default").getServletMap(). - values().iterator().next().getConfigId(); - - ServletConfigConfig servletConfig = root.getConfig(ServletConfigConfig.class, configId); - - assertThat(servletConfig.map().get("myKey"), is("myValue")); - } - - private void createClusterWithServlet() { - Element clusterElem = DomBuilderTest.parse( - "<container id='default' version='1.0'>", - " <servlet id='myServlet' class='myClass' bundle='myBundle'>", - " <path>p/a/t/h</path>", - " <servlet-config>", - " <myKey>myValue</myKey>", - " </servlet-config>", - " </servlet>", - "</container>"); - - createModel(root, clusterElem); - } - - - @Test public void processing_handler_bindings_can_be_overridden() { Element clusterElem = DomBuilderTest.parse( "<container id='default' version='1.0'>", @@ -1096,6 +1060,42 @@ public class ContainerModelBuilderTest extends ContainerModelBuilderTestBase { } } + @Test + public void logs_deployment_spec_deprecations() throws Exception { + String containerService = joinLines("<container id='foo' version='1.0'>", + " <nodes>", + " <node hostalias='host1' />", + " </nodes>", + "</container>"); + String deploymentXml = joinLines("<deployment version='1.0'>", + " <prod global-service-id='foo'>", + " <region active='true'>us-east-1</region>", + " </prod>", + "</deployment>"); + + ApplicationPackage applicationPackage = new MockApplicationPackage.Builder() + .withServices(containerService) + .withDeploymentSpec(deploymentXml) + .build(); + + TestLogger logger = new TestLogger(); + DeployState deployState = new DeployState.Builder() + .applicationPackage(applicationPackage) + .zone(new Zone(Environment.prod, RegionName.from("us-east-1"))) + .properties(new TestProperties().setHostedVespa(true)) + .deployLogger(logger) + .build(); + + createModel(root, deployState, null, DomBuilderTest.parse(containerService)); + assertFalse(logger.msgs.isEmpty()); + assertEquals(Level.WARNING, logger.msgs.get(0).getFirst()); + assertEquals(Level.WARNING, logger.msgs.get(1).getFirst()); + assertEquals("Element 'prod' contains deprecated attribute: 'global-service-id'. See https://cloud.vespa.ai/en/reference/routing#deprecated-syntax", + logger.msgs.get(0).getSecond()); + assertEquals("Element 'region' contains deprecated attribute: 'active'. See https://cloud.vespa.ai/en/reference/routing#deprecated-syntax", + logger.msgs.get(1).getSecond()); + } + private void assertComponentConfigured(ApplicationContainerCluster cluster, String componentId) { Component<?, ?> component = cluster.getComponentsMap().get(ComponentId.fromString(componentId)); assertNotNull(component); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java index 9fa86d38142..a674a06d45e 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.model.container.xml; +import com.yahoo.collections.Pair; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.model.NullConfigModelRegistry; import com.yahoo.config.model.builder.xml.test.DomBuilderTest; @@ -17,6 +18,7 @@ import org.w3c.dom.Element; import org.xml.sax.SAXException; import java.io.IOException; +import java.util.logging.Level; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -132,14 +134,52 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { - verifyJvmGCOptions(false, null,null, ContainerCluster.G1GC); - verifyJvmGCOptions(true, null,null, ContainerCluster.CMS); - verifyJvmGCOptions(true, "",null, ContainerCluster.CMS); - verifyJvmGCOptions(false, "-XX:+UseConcMarkSweepGC",null, "-XX:+UseConcMarkSweepGC"); - verifyJvmGCOptions(true, "-XX:+UseConcMarkSweepGC",null, "-XX:+UseConcMarkSweepGC"); - verifyJvmGCOptions(false, null,"-XX:+UseG1GC", "-XX:+UseG1GC"); - verifyJvmGCOptions(false, "-XX:+UseConcMarkSweepGC","-XX:+UseG1GC", "-XX:+UseG1GC"); - verifyJvmGCOptions(false, null,"-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); + verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); + verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); + verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); + verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); + verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); + verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); + verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); + verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); + } + + @Test + public void requireThatJvmGcOptionsAreLogged() throws IOException, SAXException { + verifyLoggingOfJvmOptions(true, "gc-options", "-XX:+UseCMSInitiatingOccupancyOnly foo bar"); + verifyLoggingOfJvmOptions(true, "gc-options", "-XX:+UseConcMarkSweepGC"); + verifyLoggingOfJvmOptions(false, "gc-options", "-XX:+UseConcMarkSweepGC"); + } + + private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override) throws IOException, SAXException { + String servicesXml = + "<container version='1.0'>" + + " <nodes>" + + " <jvm " + optionName + "='" + override + "'/>" + + " <node hostalias='mockhost'/>" + + " </nodes>" + + "</container>"; + ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); + TestLogger logger = new TestLogger(); + new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() + .applicationPackage(app) + .deployLogger(logger) + .properties(new TestProperties().setHostedVespa(isHosted)) + .build()); + if (isHosted) { + Pair<Level, String> firstOption = logger.msgs.get(0); + assertEquals(Level.INFO, firstOption.getFirst()); + assertEquals("JVM " + (optionName.equals("gc-options") ? "GC " : "") + + "options from services.xml: " + override, firstOption.getSecond()); + } else { + assertEquals(0, logger.msgs.size()); + } + } + + @Test + public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { + verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); + verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); } } diff --git a/config-model/src/test/java/helpers/CompareConfigTestHelper.java b/config-model/src/test/java/helpers/CompareConfigTestHelper.java index 18c0723c6a2..ba06ecc9397 100644 --- a/config-model/src/test/java/helpers/CompareConfigTestHelper.java +++ b/config-model/src/test/java/helpers/CompareConfigTestHelper.java @@ -19,6 +19,10 @@ import static org.junit.Assert.assertEquals; public class CompareConfigTestHelper { public static void assertSerializedConfigFileEquals(String filename, String actual) throws IOException { + IOUtils.writeFile(filename + ".actual", actual, false); + if (! actual.endsWith("\n")) { + IOUtils.writeFile(filename + ".actual", "\n", true); + } assertSerializedConfigEquals(IOUtils.readFile(new File(filename)), actual, false); } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java index 182b924e877..958a37e1432 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java @@ -58,8 +58,8 @@ public final class Capacity { */ public NodeType type() { return type; } - public Capacity withGroups(int groups) { - return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type); + public Capacity withLimits(ClusterResources min, ClusterResources max) { + return new Capacity(min, max, required, canFail, type); } @Override diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java index e7d28a3f65b..209f339f51f 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java @@ -16,9 +16,7 @@ public interface Deployer { * Creates a new deployment from the active application, if available. Will use the default timeout for deployment. * * @param application the active application to be redeployed - * @return a new deployment from the local active, or empty if a local active application - * was not present for this id (meaning it either is not active or deployed at another - * node in the config server cluster) + * @return a new deployment from the active application, or empty if application does not exist */ default Optional<Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); @@ -29,9 +27,7 @@ public interface Deployer { * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping - * @return a new deployment from the local active, or empty if a local active application - * was not present for this id (meaning it either is not active or deployed at another - * node in the config server cluster) + * @return a new deployment from the active application, or empty if application does not exist */ Optional<Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap); @@ -41,9 +37,7 @@ public interface Deployer { * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation - * @return a new deployment from the local active, or empty if a local active application - * was not present for this id (meaning it either is not active or active on another - * node in the config server cluster) + * @return a new deployment from the active application, or empty if application does not exist */ default Optional<Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { return deployFromLocalActive(application, timeout, false); @@ -56,9 +50,7 @@ public interface Deployer { * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping - * @return a new deployment from the local active, or empty if a local active application - * was not present for this id (meaning it either is not active or active on another - * node in the config server cluster) + * @return a new deployment from the active application, or empty if application does not exist */ Optional<Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap); diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/InfraDeployer.java b/config-provisioning/src/main/java/com/yahoo/config/provision/InfraDeployer.java index c8816b8e1d5..7d0d489ac7f 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/InfraDeployer.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/InfraDeployer.java @@ -18,4 +18,5 @@ public interface InfraDeployer { /** Deploys all supported infrastructure applications in this zone. */ void activateAllSupportedInfraApplications(boolean propagateException); + } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java index b887a2a93e6..5daaee4299e 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java @@ -17,6 +17,7 @@ public class NodeResources { private static final double diskUnitCost = 0.0003; private static final NodeResources zero = new NodeResources(0, 0, 0, 0); + private static final NodeResources unspecified = new NodeResources(0, 0, 0, 0); public enum DiskSpeed { @@ -125,46 +126,56 @@ public class NodeResources { } public NodeResources withVcpu(double vcpu) { + ensureSpecified(); if (vcpu == this.vcpu) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withMemoryGb(double memoryGb) { + ensureSpecified(); if (memoryGb == this.memoryGb) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withDiskGb(double diskGb) { + ensureSpecified(); if (diskGb == this.diskGb) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withBandwidthGbps(double bandwidthGbps) { + ensureSpecified(); if (bandwidthGbps == this.bandwidthGbps) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources with(DiskSpeed diskSpeed) { + ensureSpecified(); if (diskSpeed == this.diskSpeed) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources with(StorageType storageType) { + ensureSpecified(); if (storageType == this.storageType) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } /** Returns this with disk speed and storage type set to any */ public NodeResources justNumbers() { + if (isUnspecified()) return unspecified(); return with(NodeResources.DiskSpeed.any).with(StorageType.any); } /** Returns this with all numbers set to 0 */ public NodeResources justNonNumbers() { + if (isUnspecified()) return unspecified(); return withVcpu(0).withMemoryGb(0).withDiskGb(0).withBandwidthGbps(0); } public NodeResources subtract(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if ( ! this.isInterchangeableWith(other)) throw new IllegalArgumentException(this + " and " + other + " are not interchangeable"); return new NodeResources(vcpu - other.vcpu, @@ -176,6 +187,7 @@ public class NodeResources { } public NodeResources add(NodeResources other) { + ensureSpecified(); if ( ! this.isInterchangeableWith(other)) throw new IllegalArgumentException(this + " and " + other + " are not interchangeable"); return new NodeResources(vcpu + other.vcpu, @@ -187,6 +199,8 @@ public class NodeResources { } private boolean isInterchangeableWith(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if (this.diskSpeed != DiskSpeed.any && other.diskSpeed != DiskSpeed.any && this.diskSpeed != other.diskSpeed) return false; if (this.storageType != StorageType.any && other.storageType != StorageType.any && this.storageType != other.storageType) @@ -248,6 +262,8 @@ public class NodeResources { /** Returns true if all the resources of this are the same or larger than the given resources */ public boolean satisfies(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if (this.vcpu < other.vcpu) return false; if (this.memoryGb < other.memoryGb) return false; if (this.diskGb < other.diskGb) return false; @@ -276,9 +292,14 @@ public class NodeResources { return true; } - public static NodeResources unspecified() { return zero; } + public static NodeResources unspecified() { return unspecified; } - public boolean isUnspecified() { return this.equals(zero); } + public boolean isUnspecified() { return this == unspecified; } + + private void ensureSpecified() { + if (isUnspecified()) + throw new IllegalStateException("Cannot perform this on unspecified resources"); + } // Returns squared euclidean distance of the relevant numerical values of two node resources public double distanceTo(NodeResources other) { diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java index c7f6530f81c..158df654439 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java @@ -30,10 +30,10 @@ import java.util.logging.Logger; * * @author hmusum */ -public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer { +public class ConfigProxyRpcServer implements Runnable, TargetWatcher { private final static Logger log = Logger.getLogger(ConfigProxyRpcServer.class.getName()); - private static final int TRACELEVEL = 6; + static final int TRACELEVEL = 6; private final Spec spec; private final Supervisor supervisor; @@ -66,7 +66,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer } catch (InterruptedException e) { throw new RuntimeException(e); } - supervisor.transport().shutdown(); + supervisor.transport().shutdown().join(); } Spec getSpec() { @@ -79,10 +79,6 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer this::ping) .methodDesc("ping") .returnDesc(0, "ret code", "return code, 0 is OK")); - supervisor.addMethod(new Method("printStatistics", "", "s", - this::printStatistics) - .methodDesc("printStatistics") - .returnDesc(0, "statistics", "Statistics for server")); supervisor.addMethod(new Method("listCachedConfig", "", "S", this::listCachedConfig) .methodDesc("list cached configs)") @@ -145,26 +141,6 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer }); } - /** - * Returns a String with statistics data for the server. - * - * @param req a Request - */ - private void printStatistics(Request req) { - dispatchRpcRequest(req, () -> { - StringBuilder sb = new StringBuilder(); - sb.append("\nDelayed responses queue size: "); - sb.append(proxyServer.delayedResponses().size()); - sb.append("\nContents: "); - for (DelayedResponse delayed : proxyServer.delayedResponses().responses()) { - sb.append(delayed.getRequest().toString()).append("\n"); - } - - req.returnValues().add(new StringValue(sb.toString())); - req.returnRequest(); - }); - } - private void listCachedConfig(Request req) { dispatchRpcRequest(req, () -> listCachedConfig(req, false)); } @@ -201,7 +177,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void invalidateCache(Request req) { dispatchRpcRequest(req, () -> { - proxyServer.getMemoryCache().clear(); + proxyServer.memoryCache().clear(); String[] s = new String[2]; s[0] = "0"; s[1] = "success"; @@ -237,7 +213,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void dumpCache(Request req) { dispatchRpcRequest(req, () -> { - final MemoryCache memoryCache = proxyServer.getMemoryCache(); + final MemoryCache memoryCache = proxyServer.memoryCache(); req.returnValues().add(new StringValue(memoryCache.dumpCacheToDisk(req.parameters().get(0).asString(), memoryCache))); req.returnRequest(); }); @@ -269,12 +245,13 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer * @param request a Request */ private void getConfigImpl(JRTServerConfigRequest request) { + ResponseHandler responseHandler = new ResponseHandler(); request.getRequestTrace().trace(TRACELEVEL, "Config proxy getConfig()"); log.log(Level.FINE, () ->"getConfig: " + request.getShortDescription() + ",config checksums=" + request.getRequestConfigChecksums()); if (!request.validateParameters()) { // Error code is set in verifyParameters if parameters are not OK. log.log(Level.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); - returnErrorResponse(request, request.errorCode(), "Parameters for request " + request.getShortDescription() + " did not validate: " + request.errorMessage()); + responseHandler.returnErrorResponse(request, request.errorCode(), "Parameters for request " + request.getShortDescription() + " did not validate: " + request.errorMessage()); return; } try { @@ -282,13 +259,13 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer if (config == null) { log.log(Level.FINEST, () -> "No config received yet for " + request.getShortDescription() + ", not sending response"); } else if (ProxyServer.configOrGenerationHasChanged(config, request)) { - returnOkResponse(request, config); + responseHandler.returnOkResponse(request, config); } else { log.log(Level.FINEST, () -> "No new config for " + request.getShortDescription() + ", not sending response"); } } catch (Exception e) { e.printStackTrace(); - returnErrorResponse(request, com.yahoo.vespa.config.ErrorCode.INTERNAL_ERROR, e.getMessage()); + responseHandler.returnErrorResponse(request, com.yahoo.vespa.config.ErrorCode.INTERNAL_ERROR, e.getMessage()); } } @@ -302,7 +279,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void listCachedConfig(Request req, boolean full) { String[] ret; - MemoryCache cache = proxyServer.getMemoryCache(); + MemoryCache cache = proxyServer.memoryCache(); ret = new String[cache.size()]; int i = 0; for (RawConfig config : cache.values()) { @@ -348,29 +325,4 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer // requesting this config? } - public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { - request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnOkResponse()"); - request.addOkResponse(config.getPayload(), - config.getGeneration(), - config.applyOnRestart(), - config.getPayloadChecksums()); - log.log(Level.FINE, () -> "Return response: " + request.getShortDescription() + ",config checksums=" + config.getPayloadChecksums() + - ",generation=" + config.getGeneration()); - log.log(Level.FINEST, () -> "Config payload in response for " + request.getShortDescription() + ":" + config.getPayload()); - - - // TODO Catch exception for now, since the request might have been returned in CheckDelayedResponse - // TODO Move logic so that all requests are returned in CheckDelayedResponse - try { - request.getRequest().returnRequest(); - } catch (IllegalStateException e) { - log.log(Level.FINE, () -> "Something bad happened when sending response for '" + request.getShortDescription() + "':" + e.getMessage()); - } - } - - public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { - request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnErrorResponse()"); - request.addErrorResponse(errorCode, message); - request.getRequest().returnRequest(); - } } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java index 6e5fe2d3fd8..dae732e56ec 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java @@ -16,7 +16,7 @@ interface ConfigSourceClient { RawConfig getConfig(RawConfig input, JRTServerConfigRequest request); - void cancel(); + void shutdown(); void shutdownSourceConnections(); @@ -26,4 +26,6 @@ interface ConfigSourceClient { DelayedResponses delayedResponses(); + MemoryCache memoryCache(); + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java index f77bd4b9138..0e8ebe0d9c9 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java @@ -6,10 +6,12 @@ import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.yolean.Exceptions; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.logging.Level; import java.util.logging.Logger; +import static com.yahoo.protect.Process.logAndDie; + /** * The run method of this class is executed periodically to return delayed responses * (requests use long polling, so config proxy needs to return a response when they time out). @@ -22,12 +24,13 @@ public class DelayedResponseHandler implements Runnable { private final DelayedResponses delayedResponses; private final MemoryCache memoryCache; - private final RpcServer rpcServer; + private final ResponseHandler responseHandler; + private final AtomicLong sentResponses = new AtomicLong(); - DelayedResponseHandler(DelayedResponses delayedResponses, MemoryCache memoryCache, RpcServer rpcServer) { + DelayedResponseHandler(DelayedResponses delayedResponses, MemoryCache memoryCache, ResponseHandler responseHandler) { this.delayedResponses = delayedResponses; this.memoryCache = memoryCache; - this.rpcServer = rpcServer; + this.responseHandler = responseHandler; } @Override @@ -41,25 +44,27 @@ public class DelayedResponseHandler implements Runnable { log.log(Level.FINEST, () -> "Running DelayedResponseHandler. There are " + delayedResponses.size() + " delayed responses. First one is " + delayedResponses.responses().peek()); DelayedResponse response; - AtomicInteger i = new AtomicInteger(0); while ((response = delayedResponses.responses().poll()) != null) { JRTServerConfigRequest request = response.getRequest(); ConfigCacheKey cacheKey = new ConfigCacheKey(request.getConfigKey(), request.getRequestDefMd5()); RawConfig config = memoryCache.get(cacheKey); if (config != null) { - rpcServer.returnOkResponse(request, config); - i.incrementAndGet(); + responseHandler.returnOkResponse(request, config); + sentResponses.incrementAndGet(); } else { log.log(Level.WARNING, "Timed out (timeout " + request.getTimeout() + ") getting config " + request.getConfigKey() + ", will retry"); } } - log.log(Level.FINEST, () -> "Finished running DelayedResponseHandler. " + i.get() + " delayed responses sent in " + - (System.currentTimeMillis() - start) + " ms"); + log.log(Level.FINEST, () -> "Finished running DelayedResponseHandler. " + sentResponses.get() + + " delayed responses sent in " + (System.currentTimeMillis() - start) + " ms"); } catch (Exception e) { // To avoid thread throwing exception and executor never running this again log.log(Level.WARNING, "Got exception in DelayedResponseHandler: " + Exceptions.toMessageString(e)); } catch (Throwable e) { - com.yahoo.protect.Process.logAndDie("Got error in DelayedResponseHandler, exiting: " + Exceptions.toMessageString(e)); + logAndDie("Got error in DelayedResponseHandler, exiting: " + Exceptions.toMessageString(e)); } } + + public long sentResponses() { return sentResponses.get(); } + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java index 6e90ad16f50..f1be03f07d4 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java @@ -1,12 +1,14 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; -import java.util.logging.Level; -import com.yahoo.vespa.config.*; +import com.yahoo.vespa.config.ConfigCacheKey; +import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import java.util.Collections; import java.util.List; +import java.util.logging.Level; import java.util.logging.Logger; /** @@ -44,7 +46,7 @@ class MemoryCacheConfigClient implements ConfigSourceClient { } @Override - public void cancel() {} + public void shutdown() {} @Override public void shutdownSourceConnections() {} @@ -64,4 +66,7 @@ class MemoryCacheConfigClient implements ConfigSourceClient { return delayedResponses; } + @Override + public MemoryCache memoryCache() { return cache; } + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java index d063c45a3f7..0ae02e4c17b 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java @@ -1,27 +1,27 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; +import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Spec; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; -import java.util.logging.Level; import com.yahoo.log.LogSetup; import com.yahoo.log.event.Event; -import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.proxy.filedistribution.FileDistributionAndUrlDownload; import com.yahoo.yolean.system.CatchSignals; import java.util.List; +import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; import java.util.logging.Logger; import static com.yahoo.vespa.config.proxy.Mode.ModeName.DEFAULT; @@ -40,27 +40,24 @@ public class ProxyServer implements Runnable { private static final int JRT_TRANSPORT_THREADS = 4; static final String DEFAULT_PROXY_CONFIG_SOURCES = "tcp/localhost:19070"; - private final static Logger log = Logger.getLogger(ProxyServer.class.getName()); + private static final Logger log = Logger.getLogger(ProxyServer.class.getName()); + private final AtomicBoolean signalCaught = new AtomicBoolean(false); private final Supervisor supervisor; private final ConfigProxyRpcServer rpcServer; - private ConfigSourceSet configSource; - - private volatile ConfigSourceClient configClient; - - private final MemoryCache memoryCache; private final FileDistributionAndUrlDownload fileDistributionAndUrlDownload; + private ConfigSourceSet configSource; + private volatile ConfigSourceClient configClient; private volatile Mode mode = new Mode(DEFAULT); - ProxyServer(Spec spec, ConfigSourceSet source, MemoryCache memoryCache, ConfigSourceClient configClient) { - this.configSource = source; - supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true); + ProxyServer(Spec spec, ConfigSourceSet source, ConfigSourceClient configClient) { + this.configSource = Objects.requireNonNull(source); log.log(Level.FINE, () -> "Using config source '" + source); - this.memoryCache = memoryCache; + this.supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true); this.rpcServer = createRpcServer(spec); - this.configClient = (configClient == null) ? createRpcClient(rpcServer, source, memoryCache) : configClient; + this.configClient = Objects.requireNonNull(configClient); this.fileDistributionAndUrlDownload = new FileDistributionAndUrlDownload(supervisor, source); } @@ -97,12 +94,12 @@ public class ProxyServer implements Runnable { switch (newMode.getMode()) { case MEMORYCACHE: configClient.shutdownSourceConnections(); - configClient = new MemoryCacheConfigClient(memoryCache); + configClient = new MemoryCacheConfigClient(configClient.memoryCache()); this.mode = new Mode(modeName); break; case DEFAULT: flush(); - configClient = createRpcClient(rpcServer, configSource, memoryCache); + configClient = createRpcClient(configSource); this.mode = new Mode(modeName); break; default: @@ -115,8 +112,8 @@ public class ProxyServer implements Runnable { return (spec == null) ? null : new ConfigProxyRpcServer(this, supervisor, spec); // TODO: Try to avoid first argument being 'this' } - private static RpcConfigSourceClient createRpcClient(RpcServer rpcServer, ConfigSourceSet source, MemoryCache memoryCache) { - return new RpcConfigSourceClient(rpcServer, source, memoryCache); + private static RpcConfigSourceClient createRpcClient(ConfigSourceSet source) { + return new RpcConfigSourceClient(new ResponseHandler(), source); } private void setupSignalHandler() { @@ -159,7 +156,7 @@ public class ProxyServer implements Runnable { Event.started("configproxy"); ConfigSourceSet configSources = new ConfigSourceSet(properties.configSources); - ProxyServer proxyServer = new ProxyServer(new Spec(null, port), configSources, new MemoryCache(), null); + ProxyServer proxyServer = new ProxyServer(new Spec(null, port), configSources, createRpcClient(configSources)); // catch termination and interrupt signal proxyServer.setupSignalHandler(); Thread proxyserverThread = threadFactory.newThread(proxyServer); @@ -169,7 +166,8 @@ public class ProxyServer implements Runnable { } static Properties getSystemProperties() { - final String[] inputConfigSources = System.getProperty("proxyconfigsources", DEFAULT_PROXY_CONFIG_SOURCES).split(","); + String[] inputConfigSources = System.getProperty("proxyconfigsources", + DEFAULT_PROXY_CONFIG_SOURCES).split(","); return new Properties(inputConfigSources); } @@ -184,15 +182,15 @@ public class ProxyServer implements Runnable { // Cancels all config instances and flushes the cache. When this method returns, // the cache will not be updated again before someone calls getConfig(). private synchronized void flush() { - memoryCache.clear(); - configClient.cancel(); + configClient.memoryCache().clear(); + configClient.shutdown(); } void stop() { Event.stopping("configproxy", "shutdown rpcServer"); if (rpcServer != null) rpcServer.shutdown(); Event.stopping("configproxy", "cancel configClient"); - if (configClient != null) configClient.cancel(); + configClient.shutdown(); Event.stopping("configproxy", "flush"); flush(); Event.stopping("configproxy", "close fileDistribution"); @@ -200,8 +198,8 @@ public class ProxyServer implements Runnable { Event.stopping("configproxy", "stop complete"); } - MemoryCache getMemoryCache() { - return memoryCache; + MemoryCache memoryCache() { + return configClient.memoryCache(); } String getActiveSourceConnection() { @@ -215,7 +213,7 @@ public class ProxyServer implements Runnable { void updateSourceConnections(List<String> sources) { configSource = new ConfigSourceSet(sources); flush(); - configClient = createRpcClient(rpcServer, configSource, memoryCache); + configClient = createRpcClient(configSource); } DelayedResponses delayedResponses() { diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java new file mode 100644 index 00000000000..c9cfbdd3e16 --- /dev/null +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java @@ -0,0 +1,63 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config.proxy; + +import com.yahoo.vespa.config.RawConfig; +import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import static com.yahoo.vespa.config.proxy.ConfigProxyRpcServer.TRACELEVEL; + +/** + * An RPC server that handles config and file distribution requests. + * + * @author hmusum + */ +public class ResponseHandler { + + private final Optional<AtomicLong> sentResponses; + + public ResponseHandler() { + this(false); + } + + // For testing only + ResponseHandler(boolean trackResponses) { + sentResponses = trackResponses ? Optional.of(new AtomicLong()) : Optional.empty(); + } + + private final static Logger log = Logger.getLogger(ResponseHandler.class.getName()); + + public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { + request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnOkResponse()"); + request.addOkResponse(config.getPayload(), + config.getGeneration(), + config.applyOnRestart(), + config.getPayloadChecksums()); + log.log(Level.FINE, () -> "Return response: " + request.getShortDescription() + ",config checksums=" + config.getPayloadChecksums() + + ",generation=" + config.getGeneration()); + log.log(Level.FINEST, () -> "Config payload in response for " + request.getShortDescription() + ":" + config.getPayload()); + + + // TODO Catch exception for now, since the request might have been returned in CheckDelayedResponse + // TODO Move logic so that all requests are returned in CheckDelayedResponse + try { + request.getRequest().returnRequest(); + } catch (IllegalStateException e) { + log.log(Level.FINE, () -> "Something bad happened when sending response for '" + request.getShortDescription() + "':" + e.getMessage()); + } + sentResponses.ifPresent(AtomicLong::getAndIncrement); + } + + public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { + request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnErrorResponse()"); + request.addErrorResponse(errorCode, message); + request.getRequest().returnRequest(); + } + + public long sentResponses() { return sentResponses.map(AtomicLong::get).orElse(0L); } + +} diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java index 5df7b1fc021..56fdae477b2 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java @@ -4,7 +4,7 @@ package com.yahoo.vespa.config.proxy; import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.config.ConfigurationRuntimeException; import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.config.subscription.impl.JRTConfigRequester; +import com.yahoo.config.subscription.impl.JrtConfigRequesters; import com.yahoo.jrt.Request; import com.yahoo.jrt.Spec; import com.yahoo.jrt.Supervisor; @@ -17,6 +17,7 @@ import com.yahoo.vespa.config.TimingValues; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; @@ -43,29 +44,30 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { private final Supervisor supervisor = new Supervisor(new Transport("config-source-client")); - private final RpcServer rpcServer; + private final ResponseHandler responseHandler; private final ConfigSourceSet configSourceSet; - private final Map<ConfigCacheKey, Subscriber> activeSubscribers = new ConcurrentHashMap<>(); + private final Object subscribersLock = new Object(); + private final Map<ConfigCacheKey, Subscriber> subscribers = new ConcurrentHashMap<>(); private final MemoryCache memoryCache; private final DelayedResponses delayedResponses; private final ScheduledExecutorService nextConfigScheduler = Executors.newScheduledThreadPool(1, new DaemonThreadFactory("next config")); private final ScheduledFuture<?> nextConfigFuture; - private final JRTConfigRequester requester; + private final JrtConfigRequesters requesters; // Scheduled executor that periodically checks for requests that have timed out and response should be returned to clients private final ScheduledExecutorService delayedResponsesScheduler = Executors.newScheduledThreadPool(1, new DaemonThreadFactory("delayed responses")); private final ScheduledFuture<?> delayedResponsesFuture; - RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache) { - this.rpcServer = rpcServer; + RpcConfigSourceClient(ResponseHandler responseHandler, ConfigSourceSet configSourceSet) { + this.responseHandler = responseHandler; this.configSourceSet = configSourceSet; - this.memoryCache = memoryCache; + this.memoryCache = new MemoryCache(); this.delayedResponses = new DelayedResponses(); checkConfigSources(); nextConfigFuture = nextConfigScheduler.scheduleAtFixedRate(this, 0, 10, MILLISECONDS); - this.requester = JRTConfigRequester.create(configSourceSet, timingValues); - DelayedResponseHandler command = new DelayedResponseHandler(delayedResponses, memoryCache, rpcServer); + this.requesters = new JrtConfigRequesters(); + DelayedResponseHandler command = new DelayedResponseHandler(delayedResponses, memoryCache, responseHandler); this.delayedResponsesFuture = delayedResponsesScheduler.scheduleAtFixedRate(command, 5, 1, SECONDS); } @@ -139,22 +141,29 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { } private void subscribeToConfig(RawConfig input, ConfigCacheKey configCacheKey) { - if (activeSubscribers.containsKey(configCacheKey)) return; + synchronized (subscribersLock) { + if (subscribers.containsKey(configCacheKey)) return; - log.log(Level.FINE, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); - var subscriber = new Subscriber(input, configSourceSet, timingValues, requester); - try { - subscriber.subscribe(); - activeSubscribers.put(configCacheKey, subscriber); - } catch (ConfigurationRuntimeException e) { - log.log(Level.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); - subscriber.cancel(); + log.log(Level.FINE, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); + var subscriber = new Subscriber(input, timingValues, requesters + .getRequester(configSourceSet, timingValues)); + try { + subscriber.subscribe(); + subscribers.put(configCacheKey, subscriber); + } catch (ConfigurationRuntimeException e) { + log.log(Level.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); + subscriber.cancel(); + } } } @Override public void run() { - activeSubscribers.values().forEach(subscriber -> { + Collection<Subscriber> s; + synchronized (subscribersLock) { + s = List.copyOf(subscribers.values()); + } + s.forEach(subscriber -> { if (!subscriber.isClosed()) { Optional<RawConfig> config = subscriber.nextGeneration(); config.ifPresent(this::updateWithNewConfig); @@ -163,7 +172,7 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { } @Override - public void cancel() { + public void shutdown() { log.log(Level.FINE, "shutdownSourceConnections"); shutdownSourceConnections(); log.log(Level.FINE, "delayedResponsesFuture.cancel"); @@ -180,19 +189,21 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { @Override public void shutdownSourceConnections() { log.log(Level.FINE, "Subscriber::cancel"); - activeSubscribers.values().forEach(Subscriber::cancel); - activeSubscribers.clear(); + synchronized (subscribers) { + subscribers.values().forEach(Subscriber::cancel); + subscribers.clear(); + } log.log(Level.FINE, "nextConfigFuture.cancel"); nextConfigFuture.cancel(true); log.log(Level.FINE, "nextConfigScheduler.shutdownNow"); nextConfigScheduler.shutdownNow(); log.log(Level.FINE, "requester.close"); - requester.close(); + requesters.close(); } @Override public String getActiveSourceConnection() { - return requester.getConnectionPool().getCurrent().getAddress(); + return requesters.getRequester(configSourceSet, timingValues).getConnectionPool().getCurrent().getAddress(); } @Override @@ -230,7 +241,7 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { log.log(Level.FINE, () -> "Call returnOkResponse for " + key + "," + generation); if (config.getPayload().getData().getByteLength() == 0) log.log(Level.WARNING, () -> "Call returnOkResponse for " + key + "," + generation + " with empty config"); - rpcServer.returnOkResponse(request, config); + responseHandler.returnOkResponse(request, config); } else { log.log(Level.INFO, "Could not remove " + key + " from delayedResponses queue, already removed"); } @@ -243,9 +254,10 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { } @Override - public DelayedResponses delayedResponses() { - return delayedResponses; - } + public DelayedResponses delayedResponses() { return delayedResponses; } + + @Override + public MemoryCache memoryCache() { return memoryCache; } private void updateWithNewConfig(RawConfig newConfig) { log.log(Level.FINE, () -> "config to be returned for '" + newConfig.getKey() + diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/Subscriber.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/Subscriber.java index 70ff4456f6c..b407c0e7e76 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/Subscriber.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/Subscriber.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; -import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.config.subscription.impl.GenericConfigHandle; import com.yahoo.config.subscription.impl.GenericConfigSubscriber; import com.yahoo.config.subscription.impl.JRTConfigRequester; @@ -10,7 +9,6 @@ import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; import com.yahoo.yolean.Exceptions; -import java.util.Map; import java.util.Optional; import java.util.logging.Level; import java.util.logging.Logger; @@ -23,22 +21,20 @@ public class Subscriber { private final static Logger log = Logger.getLogger(Subscriber.class.getName()); private final RawConfig config; - private final ConfigSourceSet configSourceSet; private final TimingValues timingValues; private final GenericConfigSubscriber subscriber; private GenericConfigHandle handle; - Subscriber(RawConfig config, ConfigSourceSet configSourceSet, TimingValues timingValues, JRTConfigRequester requester) { + Subscriber(RawConfig config, TimingValues timingValues, JRTConfigRequester requester) { this.config = config; - this.configSourceSet = configSourceSet; this.timingValues = timingValues; - this.subscriber = new GenericConfigSubscriber(Map.of(configSourceSet, requester)); + this.subscriber = new GenericConfigSubscriber(requester); } void subscribe() { ConfigKey<?> key = config.getKey(); handle = subscriber.subscribe(new ConfigKey<>(key.getName(), key.getConfigId(), key.getNamespace()), - config.getDefContent(), configSourceSet, timingValues); + config.getDefContent(), timingValues); } public Optional<RawConfig> nextGeneration() { @@ -58,14 +54,8 @@ public class Subscriber { return Optional.empty(); } - public void cancel() { - if (subscriber != null) { - subscriber.close(); - } - } + public void cancel() { subscriber.close(); } - boolean isClosed() { - return subscriber.isClosed(); - } + boolean isClosed() { return subscriber.isClosed(); } } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java index f2f52dca9fa..68570722117 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java @@ -4,7 +4,7 @@ package com.yahoo.vespa.config.proxy.filedistribution; import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Supervisor; -import com.yahoo.vespa.config.JRTConnectionPool; +import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.filedistribution.FileDownloader; import java.time.Duration; @@ -20,15 +20,14 @@ import java.util.concurrent.TimeUnit; public class FileDistributionAndUrlDownload { private static final Duration delay = Duration.ofMinutes(1); + private final FileDistributionRpcServer fileDistributionRpcServer; private final UrlDownloadRpcServer urlDownloadRpcServer; private final ScheduledExecutorService cleanupExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup")); public FileDistributionAndUrlDownload(Supervisor supervisor, ConfigSourceSet source) { - fileDistributionRpcServer = - new FileDistributionRpcServer(supervisor, - new FileDownloader(new JRTConnectionPool(source, supervisor), supervisor, Duration.ofMinutes(5))); + fileDistributionRpcServer = new FileDistributionRpcServer(supervisor, createDownloader(supervisor, source)); urlDownloadRpcServer = new UrlDownloadRpcServer(supervisor); cleanupExecutor.scheduleAtFixedRate(new CachedFilesMaintainer(), delay.toSeconds(), delay.toSeconds(), TimeUnit.SECONDS); } @@ -45,4 +44,10 @@ public class FileDistributionAndUrlDownload { } } + private FileDownloader createDownloader(Supervisor supervisor, ConfigSourceSet source) { + return new FileDownloader(new FileDistributionConnectionPool(source, supervisor), + supervisor, + Duration.ofMinutes(5)); + } + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java index 8b9d1f34154..d748ebb46e5 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java @@ -9,7 +9,9 @@ import com.yahoo.jrt.Request; import com.yahoo.jrt.StringArray; import com.yahoo.jrt.StringValue; import com.yahoo.jrt.Supervisor; +import com.yahoo.net.HostName; import com.yahoo.vespa.filedistribution.FileDownloader; +import com.yahoo.vespa.filedistribution.FileReferenceDownload; import java.io.File; import java.util.Map; @@ -101,7 +103,7 @@ class FileDistributionRpcServer { private void downloadFile(Request req) { FileReference fileReference = new FileReference(req.parameters().get(0).asString()); log.log(Level.FINE, () -> "getFile() called for file reference '" + fileReference.value() + "'"); - Optional<File> file = downloader.getFile(fileReference); + Optional<File> file = downloader.getFile(new FileReferenceDownload(fileReference, HostName.getLocalhost())); if (file.isPresent()) { new RequestTracker().trackRequest(file.get().getParentFile()); req.returnValues().add(new StringValue(file.get().getAbsolutePath())); diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java index 1bcf8d5d8be..691bc6c43a7 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java @@ -92,7 +92,7 @@ public class ConfigProxyRpcServerTest { assertThat(ret.length, is(0)); final RawConfig config = ProxyServerTest.fooConfig; - server.proxyServer().getMemoryCache().update(config); + server.proxyServer().memoryCache().update(config); req = new Request("listCachedConfig"); client.invoke(req); assertFalse(req.errorMessage(), req.isError()); @@ -119,7 +119,7 @@ public class ConfigProxyRpcServerTest { assertThat(ret.length, is(0)); final RawConfig config = ProxyServerTest.fooConfig; - server.proxyServer().getMemoryCache().update(config); + server.proxyServer().memoryCache().update(config); req = new Request("listCachedConfigFull"); client.invoke(req); assertFalse(req.errorMessage(), req.isError()); @@ -133,7 +133,7 @@ public class ConfigProxyRpcServerTest { } /** - * Tests printStatistics RPC command + * Tests listSourceConnections RPC command */ @Test public void testRpcMethodListSourceConnections() throws ListenFailedException { @@ -151,20 +151,6 @@ public class ConfigProxyRpcServerTest { } /** - * Tests printStatistics RPC command - */ - @Test - public void testRpcMethodPrintStatistics() { - Request req = new Request("printStatistics"); - client.invoke(req); - assertFalse(req.errorMessage(), req.isError()); - assertThat(req.returnValues().size(), is(1)); - assertThat(req.returnValues().get(0).asString(), is("\n" + - "Delayed responses queue size: 0\n" + - "Contents: ")); - } - - /** * Tests invalidateCache RPC command */ @Test @@ -275,7 +261,7 @@ public class ConfigProxyRpcServerTest { } private static ProxyServer createTestServer(ConfigSourceSet source) { - return new ProxyServer(null, source, new MemoryCache(), null); + return new ProxyServer(null, source, new RpcConfigSourceClient(new ResponseHandler(), source)); } private static class TestServer implements AutoCloseable { diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java index c2a0282fd05..8a668b34fd0 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java @@ -6,8 +6,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertEquals; /** * @author hmusum @@ -29,16 +28,15 @@ public class DelayedResponseHandlerTest { public void basic() { ConfigTester tester = new ConfigTester(); DelayedResponses delayedResponses = new DelayedResponses(); - final MockRpcServer mockRpcServer = new MockRpcServer(); - final MemoryCache memoryCache = new MemoryCache(); + MemoryCache memoryCache = new MemoryCache(); memoryCache.update(ConfigTester.fooConfig); - final DelayedResponseHandler delayedResponseHandler = new DelayedResponseHandler(delayedResponses, memoryCache, mockRpcServer); + DelayedResponseHandler delayedResponseHandler = new DelayedResponseHandler(delayedResponses, memoryCache, new ResponseHandler()); delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.fooConfig, 0))); delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.fooConfig, 1200000))); // should not be returned yet delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.errorConfig, 0))); // will not give a config when resolving delayedResponseHandler.checkDelayedResponses(); - assertThat(mockRpcServer.responses, is(1L)); + assertEquals(1, delayedResponseHandler.sentResponses()); } } diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java index 51d0b983764..0b590aea789 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java @@ -16,9 +16,8 @@ public class MemoryCacheConfigClientTest { @Test public void basic() { - MemoryCache cache = new MemoryCache(); - cache.update(ConfigTester.fooConfig); - MemoryCacheConfigClient client = new MemoryCacheConfigClient(cache); + MemoryCacheConfigClient client = new MemoryCacheConfigClient(new MemoryCache()); + client.memoryCache().update(ConfigTester.fooConfig); assertThat(client.getConfig(ConfigTester.fooConfig, null), is(ConfigTester.fooConfig)); assertNull(client.getConfig(ConfigTester.barConfig, null)); diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java index c0efc1cb355..d0724b9dbd0 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java @@ -18,9 +18,9 @@ public class MockConfigSourceClient implements ConfigSourceClient{ private final MemoryCache memoryCache; private final DelayedResponses delayedResponses = new DelayedResponses(); - MockConfigSourceClient(MockConfigSource configSource, MemoryCache memoryCache) { + MockConfigSourceClient(MockConfigSource configSource) { this.configSource = configSource; - this.memoryCache = memoryCache; + this.memoryCache = new MemoryCache(); } @Override @@ -35,7 +35,7 @@ public class MockConfigSourceClient implements ConfigSourceClient{ } @Override - public void cancel() { + public void shutdown() { configSource.clear(); } @@ -56,4 +56,7 @@ public class MockConfigSourceClient implements ConfigSourceClient{ @Override public DelayedResponses delayedResponses() { return delayedResponses; } + @Override + public MemoryCache memoryCache() { return memoryCache; } + } diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java deleted file mode 100644 index 56fcca191de..00000000000 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.config.proxy; - -import com.yahoo.vespa.config.RawConfig; -import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; - -/** - * @author hmusum - */ -public class MockRpcServer implements RpcServer { - - volatile long responses = 0; - volatile long errorResponses = 0; - - public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { - responses++; - } - - public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { - responses++; - errorResponses++; - } -} diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java index cdda2bf6e77..15de93b748f 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java @@ -2,7 +2,10 @@ package com.yahoo.vespa.config.proxy; import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.vespa.config.*; +import com.yahoo.vespa.config.ConfigCacheKey; +import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.ErrorCode; +import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.protocol.Payload; import org.junit.After; @@ -25,9 +28,8 @@ import static org.junit.Assert.assertTrue; */ public class ProxyServerTest { - private final MemoryCache memoryCache = new MemoryCache(); private final MockConfigSource source = new MockConfigSource(); - private final MockConfigSourceClient client = new MockConfigSourceClient(source, memoryCache); + private final ConfigSourceClient client = new MockConfigSourceClient(source); private ProxyServer proxy; static final RawConfig fooConfig = ConfigTester.fooConfig; @@ -46,7 +48,7 @@ public class ProxyServerTest { source.clear(); source.put(fooConfig.getKey(), createConfigWithNextConfigGeneration(fooConfig, 0)); source.put(errorConfigKey, createConfigWithNextConfigGeneration(fooConfig, ErrorCode.UNKNOWN_DEFINITION)); - proxy = createTestServer(source, client, memoryCache); + proxy = createTestServer(source, client); } @After @@ -57,10 +59,10 @@ public class ProxyServerTest { @Test public void basic() { assertTrue(proxy.getMode().isDefault()); - assertThat(proxy.getMemoryCache().size(), is(0)); + assertThat(proxy.memoryCache().size(), is(0)); ConfigTester tester = new ConfigTester(); - final MemoryCache memoryCache = proxy.getMemoryCache(); + MemoryCache memoryCache = proxy.memoryCache(); assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); @@ -74,7 +76,7 @@ public class ProxyServerTest { */ @Test public void testModeSwitch() { - ProxyServer proxy = createTestServer(source, client, new MemoryCache()); + ProxyServer proxy = createTestServer(source, client); assertTrue(proxy.getMode().isDefault()); for (String mode : Mode.modes()) { @@ -109,7 +111,7 @@ public class ProxyServerTest { @Test public void testGetConfigAndCaching() { ConfigTester tester = new ConfigTester(); - final MemoryCache memoryCache = proxy.getMemoryCache(); + MemoryCache memoryCache = proxy.memoryCache(); assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); @@ -134,14 +136,14 @@ public class ProxyServerTest { // Simulate an error response source.put(fooConfig.getKey(), createConfigWithNextConfigGeneration(fooConfig, ErrorCode.INTERNAL_ERROR)); - final MemoryCache cacheManager = proxy.getMemoryCache(); - assertEquals(0, cacheManager.size()); + MemoryCache memoryCache = proxy.memoryCache(); + assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); assertNotNull(res.getPayload()); assertTrue(res.isError()); - assertEquals(0, cacheManager.size()); + assertEquals(0, memoryCache.size()); // Put a version of the same config into backend without error and see that it now works (i.e. we are // not getting a cached response (of the error in the previous request) @@ -152,12 +154,12 @@ public class ProxyServerTest { assertNotNull(res); assertNotNull(res.getPayload().getData()); assertThat(res.getPayload().toString(), is(ConfigTester.fooPayload.toString())); - assertEquals(1, cacheManager.size()); + assertEquals(1, memoryCache.size()); JRTServerConfigRequest newRequestBasedOnResponse = tester.createRequest(res); RawConfig res2 = proxy.resolveConfig(newRequestBasedOnResponse); assertFalse(ProxyServer.configOrGenerationHasChanged(res2, newRequestBasedOnResponse)); - assertEquals(1, cacheManager.size()); + assertEquals(1, memoryCache.size()); } /** @@ -169,7 +171,7 @@ public class ProxyServerTest { @Test public void testNoCachingOfEmptyConfig() { ConfigTester tester = new ConfigTester(); - MemoryCache cache = proxy.getMemoryCache(); + MemoryCache cache = proxy.memoryCache(); assertEquals(0, cache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); @@ -222,10 +224,8 @@ public class ProxyServerTest { assertThat(properties.configSources[0], is(ProxyServer.DEFAULT_PROXY_CONFIG_SOURCES)); } - private static ProxyServer createTestServer(ConfigSourceSet source, - ConfigSourceClient configSourceClient, - MemoryCache memoryCache) { - return new ProxyServer(null, source, memoryCache, configSourceClient); + private static ProxyServer createTestServer(ConfigSourceSet source, ConfigSourceClient configSourceClient) { + return new ProxyServer(null, source, configSourceClient); } static RawConfig createConfigWithNextConfigGeneration(RawConfig config, int errorCode) { diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java index 372c8c41c99..ada98f4b30e 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java @@ -17,7 +17,7 @@ import static org.junit.Assert.assertEquals; */ public class RpcConfigSourceClientTest { - private MockRpcServer rpcServer; + private ResponseHandler responseHandler; private RpcConfigSourceClient rpcConfigSourceClient; @Rule @@ -26,8 +26,8 @@ public class RpcConfigSourceClientTest { @Before public void setup() { - rpcServer = new MockRpcServer(); - rpcConfigSourceClient = new RpcConfigSourceClient(rpcServer, new MockConfigSource(), new MemoryCache()); + responseHandler = new ResponseHandler(true); + rpcConfigSourceClient = new RpcConfigSourceClient(responseHandler, new MockConfigSource()); } @Test @@ -90,7 +90,7 @@ public class RpcConfigSourceClientTest { } private void assertSentResponses(int expected) { - assertEquals(expected, rpcServer.responses); + assertEquals(expected, responseHandler.sentResponses()); } private void simulateClientRequestingConfig(RawConfig config) { diff --git a/config/abi-spec.json b/config/abi-spec.json index fa016fd91da..844835ae1c5 100644 --- a/config/abi-spec.json +++ b/config/abi-spec.json @@ -212,21 +212,18 @@ "public boolean nextGeneration(long)", "protected void throwIfExceptionSet(com.yahoo.config.subscription.impl.ConfigSubscription)", "public void close()", - "protected void closeRequesters()", "public java.lang.String toString()", "public java.lang.Thread startConfigThread(java.lang.Runnable)", "protected com.yahoo.config.subscription.ConfigSubscriber$State state()", "public void reload(long)", "public com.yahoo.config.subscription.ConfigSource getSource()", - "public java.util.Map requesters()", "public boolean isClosed()", "public com.yahoo.config.subscription.ConfigHandle subscribe(com.yahoo.config.subscription.ConfigSubscriber$SingleSubscriber, java.lang.Class, java.lang.String)", "public long getGeneration()", "protected void finalize()" ], "fields": [ - "protected final java.util.List subscriptionHandles", - "protected java.util.Map requesters" + "protected final java.util.List subscriptionHandles" ] }, "com.yahoo.config.subscription.ConfigURI": { diff --git a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java index 07132c460f9..01008f0a8a2 100644 --- a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java +++ b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java @@ -5,15 +5,13 @@ import com.yahoo.config.ConfigInstance; import com.yahoo.config.ConfigurationRuntimeException; import com.yahoo.config.subscription.impl.ConfigSubscription; import com.yahoo.config.subscription.impl.JRTConfigRequester; +import com.yahoo.config.subscription.impl.JrtConfigRequesters; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.TimingValues; import com.yahoo.yolean.Exceptions; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; - import java.util.logging.Level; import java.util.logging.Logger; @@ -40,6 +38,7 @@ public class ConfigSubscriber implements AutoCloseable { private final ConfigSource source; private final Object monitor = new Object(); private final Throwable stackTraceAtConstruction; // TODO Remove once finalizer is gone + private final JrtConfigRequesters requesters = new JrtConfigRequesters(); /** The last complete config generation received by this */ private long generation = -1; @@ -52,11 +51,6 @@ public class ConfigSubscriber implements AutoCloseable { private boolean applyOnRestart = false; /** - * Reuse requesters for equal source sets, limit number if many subscriptions. - */ - protected Map<ConfigSourceSet, JRTConfigRequester> requesters = new HashMap<>(); - - /** * The states of the subscriber. Affects the validity of calling certain methods. * */ @@ -114,8 +108,8 @@ public class ConfigSubscriber implements AutoCloseable { // for testing <T extends ConfigInstance> ConfigHandle<T> subscribe(Class<T> configClass, String configId, ConfigSource source, TimingValues timingValues) { checkStateBeforeSubscribe(); - final ConfigKey<T> configKey = new ConfigKey<>(configClass, configId); - ConfigSubscription<T> sub = ConfigSubscription.get(configKey, this, source, timingValues); + ConfigKey<T> configKey = new ConfigKey<>(configClass, configId); + ConfigSubscription<T> sub = ConfigSubscription.get(configKey, requesters, source, timingValues); ConfigHandle<T> handle = new ConfigHandle<>(sub); subscribeAndHandleErrors(sub, configKey, handle, timingValues); return handle; @@ -375,19 +369,10 @@ public class ConfigSubscriber implements AutoCloseable { for (ConfigHandle<? extends ConfigInstance> h : subscriptionHandles) { h.subscription().close(); } - closeRequesters(); + requesters.close(); log.log(FINE, () -> "Config subscriber has been closed."); } - /** - * Closes all open requesters - */ - protected void closeRequesters() { - for (JRTConfigRequester requester : requesters.values()) { - requester.close(); - } - } - @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -442,14 +427,6 @@ public class ConfigSubscriber implements AutoCloseable { return source; } - /** - * Implementation detail, do not use. - * @return requesters - */ - public Map<ConfigSourceSet, JRTConfigRequester> requesters() { - return requesters; - } - public boolean isClosed() { synchronized (monitor) { return state == State.CLOSED; diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java index be71f230934..85bb1cd4ca7 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java @@ -4,7 +4,6 @@ package com.yahoo.config.subscription.impl; import com.yahoo.config.ConfigInstance; import com.yahoo.config.subscription.ConfigSet; import com.yahoo.config.subscription.ConfigSource; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import java.lang.reflect.Constructor; @@ -19,8 +18,8 @@ public class ConfigSetSubscription<T extends ConfigInstance> extends ConfigSubsc private final ConfigSet set; private final ConfigKey<T> subKey; - ConfigSetSubscription(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource cset) { - super(key, subscriber); + ConfigSetSubscription(ConfigKey<T> key, ConfigSource cset) { + super(key); if (!(cset instanceof ConfigSet)) throw new IllegalArgumentException("Source is not a ConfigSet: " + cset); this.set = (ConfigSet) cset; subKey = new ConfigKey<>(configClass, key.getConfigId()); diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java index a3265671d50..f8a45a11b70 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java @@ -31,7 +31,6 @@ import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; public abstract class ConfigSubscription<T extends ConfigInstance> { protected static final Logger log = Logger.getLogger(ConfigSubscription.class.getName()); - protected final ConfigSubscriber subscriber; private final AtomicReference<ConfigState<T>> config = new AtomicReference<>(); protected final ConfigKey<T> key; protected final Class<T> configClass; @@ -100,12 +99,10 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { * Initializes one subscription * * @param key a {@link ConfigKey} - * @param subscriber the subscriber for this subscription */ - ConfigSubscription(ConfigKey<T> key, ConfigSubscriber subscriber) { + ConfigSubscription(ConfigKey<T> key) { this.key = key; this.configClass = key.getConfigClass(); - this.subscriber = subscriber; this.config.set(new ConfigState<>()); getConfigState().getChecksums().removeChecksumsOfType(MD5); // TODO: Temporary until we don't use md5 anymore } @@ -114,23 +111,24 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { * Correct type of ConfigSubscription instance based on type of source or form of config id * * @param key a {@link ConfigKey} - * @param subscriber the subscriber for this subscription * @return a subclass of a ConfigsSubscription */ - public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, + public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, JrtConfigRequesters requesters, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); - if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); - if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); - if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); - if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); - if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); - if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); + if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, source); + if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, source); + if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, source); + if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, source); + if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, source); + if (source instanceof ConfigSourceSet) { + JRTConfigRequester requester = requesters.getRequester((ConfigSourceSet) source, timingValues); + return new JRTConfigSubscription<>(key, requester, timingValues); + } throw new IllegalArgumentException("Unknown source type: " + source); } - private static <T extends ConfigInstance> JarConfigSubscription<T> getJarSub( - ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source) { + private static <T extends ConfigInstance> JarConfigSubscription<T> getJarSub(ConfigKey<T> key, ConfigSource source) { String jarName; String path = "config/"; if (source instanceof JarSource) { @@ -141,29 +139,24 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { jarName = key.getConfigId().replace("jar:", "").replaceFirst("\\!/.*", ""); if (key.getConfigId().contains("!/")) path = key.getConfigId().replaceFirst(".*\\!/", ""); } - return new JarConfigSubscription<>(key, subscriber, jarName, path); + return new JarConfigSubscription<>(key, jarName, path); } - private static <T extends ConfigInstance> ConfigSubscription<T> getFileSub( - ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source) { + private static <T extends ConfigInstance> ConfigSubscription<T> getFileSub(ConfigKey<T> key, ConfigSource source) { File file = ((source instanceof FileSource)) ? ((FileSource) source).getFile() : new File(key.getConfigId().replace("file:", "")); - return new FileConfigSubscription<>(key, subscriber, file); + return new FileConfigSubscription<>(key, file); } - private static <T extends ConfigInstance> ConfigSubscription<T> getRawSub(ConfigKey<T> key, - ConfigSubscriber subscriber, - ConfigSource source) { + private static <T extends ConfigInstance> ConfigSubscription<T> getRawSub(ConfigKey<T> key, ConfigSource source) { String payload = ((source instanceof RawSource) ? ((RawSource) source).payload : key.getConfigId().replace("raw:", "")); - return new RawConfigSubscription<>(key, subscriber, payload); + return new RawConfigSubscription<>(key, payload); } - private static <T extends ConfigInstance> ConfigSubscription<T> getDirFileSub(ConfigKey<T> key, - ConfigSubscriber subscriber, - ConfigSource source) { + private static <T extends ConfigInstance> ConfigSubscription<T> getDirFileSub(ConfigKey<T> key, ConfigSource source) { String dir = key.getConfigId().replace("dir:", ""); if (source instanceof DirSource) { dir = ((DirSource) source).getDir().toString(); @@ -174,7 +167,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { if (!file.exists()) { throw new IllegalArgumentException("Could not find a config file for '" + key.getName() + "' in '" + dir + "'"); } - return new FileConfigSubscription<>(key, subscriber, file); + return new FileConfigSubscription<>(key, file); } @SuppressWarnings("unchecked") @@ -182,8 +175,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { public boolean equals(Object o) { if (o instanceof ConfigSubscription) { ConfigSubscription<T> other = (ConfigSubscription<T>) o; - return key.equals(other.key) && - subscriber.equals(other.subscriber); + return key.equals(other.key); } return false; } @@ -329,9 +321,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { state = State.CLOSED; } - State getState() { - return state; - } + public boolean isClosed() { return state == State.CLOSED; } /** * Returns the file name corresponding to the given key's defName. diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/FileConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/FileConfigSubscription.java index 3282bc81e55..5311b91c31f 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/FileConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/FileConfigSubscription.java @@ -5,7 +5,6 @@ import com.yahoo.config.ConfigInstance; import com.yahoo.config.ConfigurationRuntimeException; import com.yahoo.config.subscription.CfgConfigPayloadBuilder; import com.yahoo.config.subscription.ConfigInterruptedException; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.io.IOUtils; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; @@ -26,8 +25,8 @@ public class FileConfigSubscription<T extends ConfigInstance> extends ConfigSubs final File file; long ts; - FileConfigSubscription(ConfigKey<T> key, ConfigSubscriber subscriber, File f) { - super(key, subscriber); + FileConfigSubscription(ConfigKey<T> key, File f) { + super(key); setGeneration(0L); file = f; if (!file.exists() && !file.isFile()) diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/GenericConfigSubscriber.java b/config/src/main/java/com/yahoo/config/subscription/impl/GenericConfigSubscriber.java index 7fb2c59d832..e382bab576e 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/GenericConfigSubscriber.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/GenericConfigSubscriber.java @@ -1,35 +1,34 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.subscription.impl; -import java.util.List; -import java.util.Map; - import com.yahoo.config.ConfigInstance; import com.yahoo.config.subscription.ConfigHandle; -import com.yahoo.config.subscription.ConfigSource; -import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; +import java.util.List; + /** - * A subscriber that can subscribe without the class. Used by configproxy. + * A subscriber that can subscribe without the class. Used by config proxy. * * @author Vegard Havdal */ public class GenericConfigSubscriber extends ConfigSubscriber { + private final JRTConfigRequester requester; + /** * Constructs a new subscriber using the given pool of requesters (JRTConfigRequester holds 1 connection which in - * turn is subject to failover across the elems in the source set.) + * turn is subject to failover across the elements in the source set.) * The behaviour is undefined if the map key is different from the source set the requester was built with. * See also {@link JRTConfigRequester#JRTConfigRequester(com.yahoo.vespa.config.ConnectionPool, com.yahoo.vespa.config.TimingValues)} * - * @param requesters a map from config source set to config requester + * @param requester a config requester */ - public GenericConfigSubscriber(Map<ConfigSourceSet, JRTConfigRequester> requesters) { - this.requesters = requesters; + public GenericConfigSubscriber(JRTConfigRequester requester) { + this.requester = requester; } /** @@ -37,13 +36,12 @@ public class GenericConfigSubscriber extends ConfigSubscriber { * * @param key the {@link ConfigKey to subscribe to} * @param defContent the config definition content for the config to subscribe to - * @param source the config source to use * @param timingValues {@link TimingValues} * @return generic handle */ - public GenericConfigHandle subscribe(ConfigKey<RawConfig> key, List<String> defContent, ConfigSource source, TimingValues timingValues) { + public GenericConfigHandle subscribe(ConfigKey<RawConfig> key, List<String> defContent, TimingValues timingValues) { checkStateBeforeSubscribe(); - GenericJRTConfigSubscription sub = new GenericJRTConfigSubscription(key, defContent, this, source, timingValues); + GenericJRTConfigSubscription sub = new GenericJRTConfigSubscription(key, defContent, requester, timingValues); GenericConfigHandle handle = new GenericConfigHandle(sub); subscribeAndHandleErrors(sub, key, handle, timingValues); return handle; diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java index 354489ea946..43f7a1fc168 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.subscription.impl; -import com.yahoo.config.subscription.ConfigSource; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; @@ -23,10 +21,11 @@ public class GenericJRTConfigSubscription extends JRTConfigSubscription<RawConfi private final List<String> defContent; - public GenericJRTConfigSubscription(ConfigKey<RawConfig> key, List<String> defContent, ConfigSubscriber subscriber, - ConfigSource source, TimingValues timingValues) - { - super(key, subscriber, source, timingValues); + public GenericJRTConfigSubscription(ConfigKey<RawConfig> key, + List<String> defContent, + JRTConfigRequester requester, + TimingValues timingValues) { + super(key, requester, timingValues); this.defContent = defContent; } @@ -84,4 +83,5 @@ public class GenericJRTConfigSubscription extends JRTConfigSubscription<RawConfi public DefContent getDefContent() { return (DefContent.fromList(defContent)); } + } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java index 4c1d7b39755..fdfaf8b72fd 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java @@ -29,7 +29,7 @@ import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; /** - * This class fetches config payload using JRT, and acts as the callback target. + * Requests configs using RPC, and acts as the callback target. * It uses the {@link JRTConfigSubscription} and {@link JRTClientConfigRequest} * as context, and puts the request objects on a queue on the subscription, * for handling by the user thread. @@ -53,8 +53,9 @@ public class JRTConfigRequester implements RequestWaiter { private final ConnectionPool connectionPool; private final ConfigSourceSet configSourceSet; - private Instant noApplicationWarningLogged = Instant.MIN; + private Instant timeForLastLogWarning; private int failures = 0; + private volatile boolean closed = false; /** * Returns a new requester @@ -68,6 +69,8 @@ public class JRTConfigRequester implements RequestWaiter { this.scheduler = scheduler; this.connectionPool = connectionPool; this.timingValues = timingValues; + // Adjust so that we wait 1 second with logging warning in case there are some errors just when starting up + timeForLastLogWarning = Instant.now().minus(delayBetweenWarnings.plus(Duration.ofSeconds(5))); } /** @@ -93,14 +96,15 @@ public class JRTConfigRequester implements RequestWaiter { private <T extends ConfigInstance> void doRequest(JRTConfigSubscription<T> sub, JRTClientConfigRequest req) { Connection connection = connectionPool.getCurrent(); - req.getRequest().setContext(new RequestContext(sub, req, connection)); + Request request = req.getRequest(); + request.setContext(new RequestContext(sub, req, connection)); if (!req.validateParameters()) throw new ConfigurationRuntimeException("Error in parameters for config request: " + req); double jrtClientTimeout = getClientTimeout(req); log.log(FINE, () -> "Requesting config for " + sub + " on connection " + connection + " with client timeout " + jrtClientTimeout + (log.isLoggable(FINEST) ? (",defcontent=" + req.getDefContent().asString()) : "")); - connection.invokeAsync(req.getRequest(), jrtClientTimeout, this); + connection.invokeAsync(request, jrtClientTimeout, this); } @SuppressWarnings("unchecked") @@ -124,7 +128,7 @@ public class JRTConfigRequester implements RequestWaiter { } private void doHandle(JRTConfigSubscription<ConfigInstance> sub, JRTClientConfigRequest jrtReq, Connection connection) { - if (subscriptionIsClosed(sub)) return; // Avoid error messages etc. after closing + if (sub.isClosed()) return; // Avoid error messages etc. after closing boolean validResponse = jrtReq.validateResponse(); log.log(FINE, () -> "Request callback " + (validResponse ? "valid" : "invalid") + ". Req: " + jrtReq + "\nSpec: " + connection); @@ -145,12 +149,7 @@ public class JRTConfigRequester implements RequestWaiter { break; case ErrorCode.APPLICATION_NOT_LOADED: case ErrorCode.UNKNOWN_VESPA_VERSION: - if (noApplicationWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) { - log.log(WARNING, "Request callback failed: " + ErrorCode.getName(jrtReq.errorCode()) + - ". Connection spec: " + connection.getAddress() + - ", error message: " + jrtReq.errorMessage()); - noApplicationWarningLogged = Instant.now(); - } + logWarning(jrtReq, connection); break; default: log.log(WARNING, "Request callback failed. Req: " + jrtReq + "\nSpec: " + connection.getAddress() + @@ -159,6 +158,15 @@ public class JRTConfigRequester implements RequestWaiter { } } + private void logWarning(JRTClientConfigRequest jrtReq, Connection connection) { + if ( ! closed && timeForLastLogWarning.isBefore(Instant.now().minus(delayBetweenWarnings))) { + log.log(WARNING, "Request callback failed: " + ErrorCode.getName(jrtReq.errorCode()) + + ". Connection spec: " + connection.getAddress() + + ", error message: " + jrtReq.errorMessage()); + timeForLastLogWarning = Instant.now(); + } + } + private void handleFailedRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub, Connection connection) { logError(jrtReq, connection); @@ -190,7 +198,6 @@ public class JRTConfigRequester implements RequestWaiter { private void handleOKRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub) { failures = 0; - noApplicationWarningLogged = Instant.MIN; sub.setLastCallBackOKTS(Instant.now()); log.log(FINE, () -> "OK response received in handleOkRequest: " + jrtReq); if (jrtReq.hasUpdatedGeneration()) { @@ -199,10 +206,6 @@ public class JRTConfigRequester implements RequestWaiter { scheduleNextRequest(jrtReq, sub, calculateSuccessDelay(), calculateSuccessTimeout()); } - private boolean subscriptionIsClosed(JRTConfigSubscription<ConfigInstance> sub) { - return sub.getState() == ConfigSubscription.State.CLOSED; - } - private long calculateSuccessTimeout() { return timingValues.getPlusMinusFractionRandom(timingValues.getSuccessTimeout(), randomFraction); } @@ -237,8 +240,7 @@ public class JRTConfigRequester implements RequestWaiter { } public void close() { - // Fake that we have logged to avoid printing warnings after this - noApplicationWarningLogged = Instant.now(); + closed = true; if (configSourceSet != null) { managedPool.release(configSourceSet); } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java index c6ea79ddbcd..0b98e9cd1b2 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java @@ -4,9 +4,6 @@ package com.yahoo.config.subscription.impl; import com.yahoo.config.ConfigInstance; import com.yahoo.config.ConfigurationRuntimeException; import com.yahoo.config.subscription.ConfigInterruptedException; -import com.yahoo.config.subscription.ConfigSource; -import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.TimingValues; @@ -25,14 +22,14 @@ import static java.util.logging.Level.FINE; import static java.util.logging.Level.INFO; /** - * A config subscription for a config instance, gets config using Vespa RPC from a config source + * A config subscription for a config instance, gets config using RPC from a config source * (config proxy or config server). * * @author vegardh */ public class JRTConfigSubscription<T extends ConfigInstance> extends ConfigSubscription<T> { - private JRTConfigRequester requester; + private final JRTConfigRequester requester; private final TimingValues timingValues; // Last time we got an OK JRT callback @@ -43,14 +40,11 @@ public class JRTConfigSubscription<T extends ConfigInstance> extends ConfigSubsc * but has not yet been handled. */ private BlockingQueue<JRTClientConfigRequest> reqQueue = new LinkedBlockingQueue<>(); - private ConfigSourceSet sources; - public JRTConfigSubscription(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { - super(key, subscriber); + public JRTConfigSubscription(ConfigKey<T> key, JRTConfigRequester requester, TimingValues timingValues) { + super(key); this.timingValues = timingValues; - if (source instanceof ConfigSourceSet) { - this.sources = (ConfigSourceSet) source; - } + this.requester = requester; } @Override @@ -149,7 +143,6 @@ public class JRTConfigSubscription<T extends ConfigInstance> extends ConfigSubsc @Override public boolean subscribe(long timeout) { lastOK = Instant.now(); - requester = getRequester(); requester.request(this); JRTClientConfigRequest req = reqQueue.peek(); while (req == null && (Instant.now().isBefore(lastOK.plus(Duration.ofMillis(timeout))))) { @@ -163,15 +156,6 @@ public class JRTConfigSubscription<T extends ConfigInstance> extends ConfigSubsc return req != null; } - private JRTConfigRequester getRequester() { - JRTConfigRequester requester = subscriber.requesters().get(sources); - if (requester == null) { - requester = JRTConfigRequester.create(sources, timingValues); - subscriber.requesters().put(sources, requester); - } - return requester; - } - @Override @SuppressWarnings("serial") public void close() { diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java index 095ebc59c11..a75e1d0b976 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java @@ -5,7 +5,6 @@ import com.yahoo.config.ConfigInstance; import com.yahoo.config.ConfigurationRuntimeException; import com.yahoo.config.subscription.CfgConfigPayloadBuilder; import com.yahoo.config.subscription.ConfigInterruptedException; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.io.IOUtils; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; @@ -32,8 +31,8 @@ public class JarConfigSubscription<T extends ConfigInstance> extends ConfigSubsc private ZipEntry zipEntry = null; // jar:configs/app.jar!/configs/ - JarConfigSubscription(ConfigKey<T> key, ConfigSubscriber subscriber, String jarName, String path) { - super(key, subscriber); + JarConfigSubscription(ConfigKey<T> key, String jarName, String path) { + super(key); this.jarName = jarName; this.path = path; } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JrtConfigRequesters.java b/config/src/main/java/com/yahoo/config/subscription/impl/JrtConfigRequesters.java new file mode 100644 index 00000000000..1e9612272d5 --- /dev/null +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JrtConfigRequesters.java @@ -0,0 +1,38 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.config.subscription.impl; + +import com.yahoo.config.subscription.ConfigSourceSet; +import com.yahoo.vespa.config.TimingValues; + +import java.util.HashMap; +import java.util.Map; + +/** + * Keeps track of requesters per config subscriber + * + * @author hmusum + */ +public class JrtConfigRequesters { + + /** + * Reuse requesters for equal source sets, limit number if many subscriptions. + */ + protected Map<ConfigSourceSet, JRTConfigRequester> requesters = new HashMap<>(); + + public JRTConfigRequester getRequester(ConfigSourceSet source, TimingValues timingValues) { + JRTConfigRequester requester = requesters.get(source); + if (requester == null) { + requester = JRTConfigRequester.create(source, timingValues); + requesters.put(source, requester); + } + return requester; + } + + /** + * Closes all open requesters + */ + public void close() { + requesters.values().forEach(JRTConfigRequester::close); + } + +} diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java index acc1b183bb7..22939c375ae 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java @@ -4,7 +4,6 @@ package com.yahoo.config.subscription.impl; import com.yahoo.config.ConfigInstance; import com.yahoo.config.subscription.CfgConfigPayloadBuilder; import com.yahoo.config.subscription.ConfigInterruptedException; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.PayloadChecksums; @@ -23,9 +22,9 @@ public class RawConfigSubscription<T extends ConfigInstance> extends ConfigSubsc final String inputPayload; String payload; - RawConfigSubscription(ConfigKey<T> key, ConfigSubscriber subscriber, String pl) { - super(key, subscriber); - this.inputPayload = pl; + RawConfigSubscription(ConfigKey<T> key, String payload) { + super(key); + this.inputPayload = payload; } @Override diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java index c1fc50f6a82..2d7c96febd6 100644 --- a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java +++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java @@ -18,7 +18,7 @@ import java.util.logging.Logger; * @author hmusum */ public class JRTConnection implements Connection { - private final static Logger logger = Logger.getLogger(JRTConnection.class.getPackage().getName()); + private final static Logger logger = Logger.getLogger(JRTConnection.class.getName()); private final String address; private final Supervisor supervisor; diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java index 983113ab2bf..3dc0f7188e3 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java @@ -71,13 +71,10 @@ public class ConfigInstancePayloadTest { intArr(310).intArr(311)). rootStruct(new RootStruct.Builder(). - inner0(new RootStruct.Inner0.Builder(). - index(11)). + inner0(b -> b.index(11)). inner1(new RootStruct.Inner1.Builder(). index(12)). - innerArr(new RootStruct.InnerArr.Builder(). - boolVal(true). - stringVal("deep")). + innerArr(b -> b.boolVal(true).stringVal("deep")). innerArr(new RootStruct.InnerArr.Builder(). boolVal(false). stringVal("blue a=\"escaped\""))). @@ -89,32 +86,29 @@ public class ConfigInstancePayloadTest { enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file0"). - anotherarray(new Myarray.Anotherarray.Builder(). - foo(7)). + anotherarray(b -> b.foo(7)). myStruct(new Myarray.MyStruct.Builder(). a(1). b(2))). - myarray(new Myarray.Builder(). + myarray(b -> b. intval(5). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file1"). - anotherarray(new Myarray.Anotherarray.Builder(). - foo(1). - foo(2)). - myStruct(new Myarray.MyStruct.Builder(). - a(-1). - b(-2))). + anotherarray(bb -> bb.foo(1).foo(2)). + myStruct(bb -> bb. + a(-1). + b(-2))). myStructMap("one", new MyStructMap.Builder(). myInt(1). myString("bull"). myIntDef(2). myStringDef("bear"). - anotherMap("anotherOne", new MyStructMap.AnotherMap.Builder(). - anInt(3). - anIntDef(4))); + anotherMap("anotherOne", b -> b. + anInt(3). + anIntDef(4))); } @Test diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java index dabfbffef98..aaf6782a6ff 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java @@ -32,15 +32,12 @@ public class ConfigInstanceUtilTest { .int_val(-1) .intarr(0) .doublearr(0.0) - .basicStruct(new FunctionTestConfig.BasicStruct.Builder() - .bar(-1) - .intArr(0)) - .myarray(new FunctionTestConfig.Myarray.Builder() + .basicStruct(b -> b.bar(-1).intArr(0)) + .myarray(b -> b .intval(-1) .refval("") .fileVal("") - .myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder() - .a(0) + .myStruct(bb -> bb.a(0) )); ConfigInstanceUtil.setValues(destination, source); @@ -105,47 +102,34 @@ public class ConfigInstanceUtilTest { refarr(Arrays.asList(":parent:", ":parent", "parent:")). // test collection based setter fileArr("bin"). - basicStruct(new FunctionTestConfig.BasicStruct.Builder(). + basicStruct(b -> b. foo("basicFoo"). bar(3). intArr(310).intArr(311)). - rootStruct(new FunctionTestConfig.RootStruct.Builder(). - inner0(new FunctionTestConfig.RootStruct.Inner0.Builder(). - index(11)). - inner1(new FunctionTestConfig.RootStruct.Inner1.Builder(). - index(12)). - innerArr(new FunctionTestConfig.RootStruct.InnerArr.Builder(). - boolVal(true). - stringVal("deep")). - innerArr(new FunctionTestConfig.RootStruct.InnerArr.Builder(). - boolVal(false). - stringVal("blue a=\"escaped\""))). - - myarray(new FunctionTestConfig.Myarray.Builder(). + rootStruct(b -> b. + inner0(bb -> bb.index(11)). + inner1(bb -> bb.index(12)). + innerArr(bb -> bb.boolVal(true).stringVal("deep")). + innerArr(bb -> bb.boolVal(false).stringVal("blue a=\"escaped\""))). + + myarray(b -> b. intval(-5). stringval("baah"). stringval("yikes"). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file0"). - anotherarray(new FunctionTestConfig.Myarray.Anotherarray.Builder(). - foo(7)). - myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder(). - a(1). - b(2))). + anotherarray(bb -> bb.foo(7)). + myStruct(bb -> bb.a(1).b(2))). - myarray(new FunctionTestConfig.Myarray.Builder(). + myarray(b -> b. intval(5). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file1"). - anotherarray(new FunctionTestConfig.Myarray.Anotherarray.Builder(). - foo(1). - foo(2)). - myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder(). - a(-1). - b(-2))); + anotherarray(bb -> bb.foo(1).foo(2)). + myStruct(bb -> bb.a(-1).b(-2))); } diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigSetSubscriptionTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigSetSubscriptionTest.java index 0eb51e734c8..346368ee7d9 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigSetSubscriptionTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigSetSubscriptionTest.java @@ -1,35 +1,39 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.subscription; -import static org.junit.Assert.*; - +import com.yahoo.config.subscription.impl.ConfigSubscription; +import com.yahoo.config.subscription.impl.JrtConfigRequesters; import com.yahoo.foo.AppConfig; import com.yahoo.foo.SimpletypesConfig; import com.yahoo.foo.StringConfig; -import com.yahoo.config.subscription.impl.ConfigSubscription; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.TimingValues; import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + public class ConfigSetSubscriptionTest { @Test public void testConfigSubscription() { - ConfigSubscriber subscriber = new ConfigSubscriber(); ConfigSet configSet = new ConfigSet(); AppConfig.Builder a0builder = new AppConfig.Builder().message("A message, 0").times(88); configSet.addBuilder("app/0", a0builder); AppConfig.Builder a1builder = new AppConfig.Builder().message("A message, 1").times(89); configSet.addBuilder("app/1", a1builder); + JrtConfigRequesters requesters = new JrtConfigRequesters(); ConfigSubscription<AppConfig> c1 = ConfigSubscription.get( new ConfigKey<>(AppConfig.class, "app/0"), - subscriber, + requesters, configSet, new TimingValues()); ConfigSubscription<AppConfig> c2 = ConfigSubscription.get( new ConfigKey<>(AppConfig.class, "app/1"), - subscriber, + requesters, configSet, new TimingValues()); @@ -39,14 +43,13 @@ public class ConfigSetSubscriptionTest { @Test(expected = IllegalArgumentException.class) public void testUnknownKey() { - ConfigSubscriber subscriber = new ConfigSubscriber(); ConfigSet configSet = new ConfigSet(); AppConfig.Builder a0builder = new AppConfig.Builder().message("A message, 0").times(88); configSet.addBuilder("app/0", a0builder); ConfigSubscription.get( new ConfigKey<>(SimpletypesConfig.class, "simpletypes/1"), - subscriber, + new JrtConfigRequesters(), configSet, new TimingValues()); } diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigSubscriptionTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigSubscriptionTest.java index ea73a6cbef1..1b0bc858361 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigSubscriptionTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigSubscriptionTest.java @@ -3,19 +3,22 @@ package com.yahoo.config.subscription; import com.yahoo.config.ConfigInstance; import com.yahoo.config.ConfigurationRuntimeException; -import com.yahoo.foo.SimpletypesConfig; -import com.yahoo.foo.AppConfig; import com.yahoo.config.subscription.impl.ConfigSubscription; +import com.yahoo.config.subscription.impl.JrtConfigRequesters; +import com.yahoo.foo.AppConfig; +import com.yahoo.foo.SimpletypesConfig; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.TimingValues; - -import org.junit.Ignore; import org.junit.Test; import java.util.Collections; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author hmusum @@ -26,13 +29,11 @@ public class ConfigSubscriptionTest { @Test public void testEquals() { ConfigSubscriber sub = new ConfigSubscriber(); - final String payload = "boolval true"; - ConfigSubscription<SimpletypesConfig> a = ConfigSubscription.get(new ConfigKey<>(SimpletypesConfig.class, "test"), - sub, new RawSource(payload), new TimingValues()); - ConfigSubscription<SimpletypesConfig> b = ConfigSubscription.get(new ConfigKey<>(SimpletypesConfig.class, "test"), - sub, new RawSource(payload), new TimingValues()); - ConfigSubscription<SimpletypesConfig> c = ConfigSubscription.get(new ConfigKey<>(SimpletypesConfig.class, "test2"), - sub, new RawSource(payload), new TimingValues()); + + JrtConfigRequesters requesters = new JrtConfigRequesters(); + ConfigSubscription<SimpletypesConfig> a = createSubscription(requesters, "test"); + ConfigSubscription<SimpletypesConfig> b = createSubscription(requesters, "test"); + ConfigSubscription<SimpletypesConfig> c = createSubscription(requesters, "test2"); assertEquals(b, a); assertEquals(a, a); assertEquals(b, b); @@ -40,21 +41,21 @@ public class ConfigSubscriptionTest { assertNotEquals(c, a); assertNotEquals(c, b); - ConfigSubscriber subscriber = new ConfigSubscriber(); ConfigSet configSet = new ConfigSet(); AppConfig.Builder a0builder = new AppConfig.Builder().message("A message, 0").times(88); configSet.addBuilder("app/0", a0builder); AppConfig.Builder a1builder = new AppConfig.Builder().message("A message, 1").times(89); configSet.addBuilder("app/1", a1builder); + ConfigSubscription<AppConfig> c1 = ConfigSubscription.get( new ConfigKey<>(AppConfig.class, "app/0"), - subscriber, + requesters, configSet, new TimingValues()); ConfigSubscription<AppConfig> c2 = ConfigSubscription.get( new ConfigKey<>(AppConfig.class, "app/1"), - subscriber, + requesters, configSet, new TimingValues()); @@ -68,16 +69,13 @@ public class ConfigSubscriptionTest { ConfigSubscriber sub = new ConfigSubscriber(); ConfigHandle<SimpletypesConfig> handle = sub.subscribe(SimpletypesConfig.class, "raw:boolval true", 10000); assertNotNull(handle); - sub.nextConfig(false); + assertTrue(sub.nextConfig(false)); assertTrue(handle.getConfig().boolval()); - //assertTrue(sub.getSource() instanceof RawSource); sub.close(); } - // Test that subscription is closed and subscriptionHandles is empty if we get an exception - // (only the last is possible to test right now). + // Test that exception is thrown if subscribe fails and that subscription is closed if we close the subscriber @Test - @Ignore public void testSubscribeWithException() { TestConfigSubscriber sub = new TestConfigSubscriber(); ConfigSourceSet configSourceSet = new ConfigSourceSet(Collections.singletonList("tcp/localhost:99999")); @@ -85,10 +83,16 @@ public class ConfigSubscriptionTest { sub.subscribe(SimpletypesConfig.class, "configid", configSourceSet, new TimingValues().setSubscribeTimeout(100)); fail(); } catch (ConfigurationRuntimeException e) { - assertEquals(0, sub.getSubscriptionHandles().size()); + sub.close(); + assertTrue(sub.getSubscriptionHandles().get(0).subscription().isClosed()); } } + private ConfigSubscription<SimpletypesConfig> createSubscription(JrtConfigRequesters requesters, String configId) { + return ConfigSubscription.get(new ConfigKey<>(SimpletypesConfig.class, configId), + requesters, new RawSource("boolval true"), new TimingValues()); + } + private static class TestConfigSubscriber extends ConfigSubscriber { List<ConfigHandle<? extends ConfigInstance>> getSubscriptionHandles() { return subscriptionHandles; diff --git a/config/src/test/java/com/yahoo/config/subscription/GenericConfigSubscriberTest.java b/config/src/test/java/com/yahoo/config/subscription/GenericConfigSubscriberTest.java index 4616630557e..fc922cc3b07 100644 --- a/config/src/test/java/com/yahoo/config/subscription/GenericConfigSubscriberTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/GenericConfigSubscriberTest.java @@ -6,15 +6,15 @@ import com.yahoo.config.subscription.impl.GenericConfigSubscriber; import com.yahoo.config.subscription.impl.JRTConfigRequester; import com.yahoo.config.subscription.impl.JRTConfigRequesterTest; import com.yahoo.config.subscription.impl.MockConnection; +import com.yahoo.jrt.Supervisor; +import com.yahoo.jrt.Transport; import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.config.TimingValues; import com.yahoo.vespa.config.protocol.CompressionType; import org.junit.Test; -import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -31,14 +31,11 @@ public class GenericConfigSubscriberTest { @Test public void testSubscribeGeneric() throws InterruptedException { - Map<ConfigSourceSet, JRTConfigRequester> requesters = new HashMap<>(); - ConfigSourceSet sourceSet = new ConfigSourceSet("blabla"); - requesters.put(sourceSet, new JRTConfigRequester(new MockConnection(), tv)); - GenericConfigSubscriber sub = new GenericConfigSubscriber(requesters); + JRTConfigRequester requester = new JRTConfigRequester(new MockConnection(), tv); + GenericConfigSubscriber sub = new GenericConfigSubscriber(requester); final List<String> defContent = List.of("myVal int"); GenericConfigHandle handle = sub.subscribe(new ConfigKey<>("simpletypes", "id", "config"), defContent, - sourceSet, tv); assertTrue(sub.nextConfig(false)); assertTrue(handle.isChanged()); @@ -60,23 +57,6 @@ public class GenericConfigSubscriberTest { return handle.getRawConfig().getPayload().withCompression(CompressionType.UNCOMPRESSED).toString(); } - @Test - public void testGenericRequesterPooling() { - ConfigSourceSet source1 = new ConfigSourceSet("tcp/foo:78"); - ConfigSourceSet source2 = new ConfigSourceSet("tcp/bar:79"); - JRTConfigRequester req1 = JRTConfigRequester.create(source1, tv); - JRTConfigRequester req2 = JRTConfigRequester.create(source2, tv); - Map<ConfigSourceSet, JRTConfigRequester> requesters = new LinkedHashMap<>(); - requesters.put(source1, req1); - requesters.put(source2, req2); - GenericConfigSubscriber sub = new GenericConfigSubscriber(requesters); - assertEquals(sub.requesters().get(source1).getConnectionPool().getCurrent().getAddress(), "tcp/foo:78"); - assertEquals(sub.requesters().get(source2).getConnectionPool().getCurrent().getAddress(), "tcp/bar:79"); - for (JRTConfigRequester requester : requesters.values()) { - requester.close(); - } - } - @Test(expected=UnsupportedOperationException.class) public void testOverriddenSubscribeInvalid1() { createSubscriber().subscribe(null, null); @@ -93,9 +73,7 @@ public class GenericConfigSubscriberTest { } private GenericConfigSubscriber createSubscriber() { - return new GenericConfigSubscriber(Map.of( - new ConfigSourceSet("blabla"), - new JRTConfigRequester(new MockConnection(), JRTConfigRequesterTest.getTestTimingValues()))); + return new GenericConfigSubscriber(new JRTConfigRequester(new JRTConnectionPool(new ConfigSourceSet("foo"), new Supervisor(new Transport())), tv)); } } diff --git a/config/src/test/java/com/yahoo/config/subscription/impl/FileConfigSubscriptionTest.java b/config/src/test/java/com/yahoo/config/subscription/impl/FileConfigSubscriptionTest.java index 3eb532d8f7f..74af35e39dc 100644 --- a/config/src/test/java/com/yahoo/config/subscription/impl/FileConfigSubscriptionTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/impl/FileConfigSubscriptionTest.java @@ -1,11 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.subscription.impl; +import com.yahoo.config.subscription.DirSource; import com.yahoo.foo.SimpletypesConfig; import com.yahoo.foo.TestReferenceConfig; -import com.yahoo.config.subscription.ConfigSubscriber; -import com.yahoo.config.subscription.DirSource; -import com.yahoo.config.subscription.FileSource; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.TimingValues; import org.junit.Before; @@ -43,10 +41,8 @@ public class FileConfigSubscriptionTest { @Test public void require_that_new_config_is_detected_in_time() throws IOException, InterruptedException { writeConfig("intval", "23"); - ConfigSubscriber subscriber = new ConfigSubscriber(new FileSource(TEST_TYPES_FILE)); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), - subscriber, TEST_TYPES_FILE); assertTrue(sub.nextConfig(1000)); assertThat(sub.getConfigState().getConfig().intval(), is(23)); @@ -59,10 +55,8 @@ public class FileConfigSubscriptionTest { @Test public void require_that_new_config_is_detected_on_reload() throws IOException { writeConfig("intval", "23"); - ConfigSubscriber subscriber = new ConfigSubscriber(new FileSource(TEST_TYPES_FILE)); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), - subscriber, TEST_TYPES_FILE); assertTrue(sub.nextConfig(1000)); assertThat(sub.getConfigState().getConfig().intval(), is(23)); @@ -103,8 +97,10 @@ public class FileConfigSubscriptionTest { final String cfgDir = "src/test/resources/configs/foo"; final String cfgId = "dir:" + cfgDir; final ConfigKey<TestReferenceConfig> key = new ConfigKey<>(TestReferenceConfig.class, cfgId); - ConfigSubscriber subscriber = new ConfigSubscriber(); - ConfigSubscription<TestReferenceConfig> sub = ConfigSubscription.get(key, subscriber, new DirSource(new File(cfgDir)), new TimingValues()); + ConfigSubscription<TestReferenceConfig> sub = ConfigSubscription.get(key, + new JrtConfigRequesters(), + new DirSource(new File(cfgDir)), + new TimingValues()); assertTrue(sub.nextConfig(1000)); assertThat(sub.getConfigState().getConfig().configId(), is(cfgId)); } @@ -113,10 +109,8 @@ public class FileConfigSubscriptionTest { public void require_that_bad_file_throws_exception() throws IOException { // A little trick to ensure that we can create the subscriber, but that we get an error when reading. writeConfig("intval", "23"); - ConfigSubscriber subscriber = new ConfigSubscriber(new FileSource(TEST_TYPES_FILE)); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), - subscriber, TEST_TYPES_FILE); sub.reload(1); Files.delete(TEST_TYPES_FILE.toPath()); // delete file so the below statement throws exception diff --git a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java index 62a25fadf25..dca0c2d0018 100644 --- a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java @@ -2,7 +2,6 @@ package com.yahoo.config.subscription.impl; import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.foo.SimpletypesConfig; import com.yahoo.jrt.Request; import com.yahoo.vespa.config.ConfigKey; @@ -51,12 +50,11 @@ public class JRTConfigRequesterTest { @Test public void testFirstRequestAfterSubscribing() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); - - final MockConnection connection = new MockConnection(); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); + assertEquals(requester.getConnectionPool(), connection); requester.request(sub); final Request request = connection.getRequest(); @@ -70,25 +68,24 @@ public class JRTConfigRequesterTest { @Test public void testFatalError() { - ConfigSubscriber subscriber = new ConfigSubscriber(); final TimingValues timingValues = getTestTimingValues(); final MockConnection connection = new MockConnection(new ErrorResponseHandler()); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); - requester.request(createSubscription(subscriber, timingValues)); + requester.request(createSubscription(requester, timingValues)); waitUntilResponse(connection); assertEquals(1, requester.getFailures()); } @Test public void testFatalErrorSubscribed() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(new ErrorResponseHandler()); + JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); sub.setConfig(1L, false, config(), PayloadChecksums.empty()); - final MockConnection connection = new MockConnection(new ErrorResponseHandler()); - JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); requester.request(sub); waitUntilResponse(connection); assertEquals(1, requester.getFailures()); @@ -96,25 +93,23 @@ public class JRTConfigRequesterTest { @Test public void testTransientError() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); + TimingValues timingValues = getTestTimingValues(); - final MockConnection connection = new MockConnection(new ErrorResponseHandler(com.yahoo.jrt.ErrorCode.TIMEOUT)); + MockConnection connection = new MockConnection(new ErrorResponseHandler(com.yahoo.jrt.ErrorCode.TIMEOUT)); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); - requester.request(createSubscription(subscriber, timingValues)); + requester.request(createSubscription(requester, timingValues)); waitUntilResponse(connection); assertEquals(1, requester.getFailures()); } @Test public void testTransientErrorSubscribed() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(new ErrorResponseHandler(com.yahoo.jrt.ErrorCode.TIMEOUT)); + JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); sub.setConfig(1L, false, config(), PayloadChecksums.empty()); - final MockConnection connection = new MockConnection(new ErrorResponseHandler(com.yahoo.jrt.ErrorCode.TIMEOUT)); - JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); requester.request(sub); waitUntilResponse(connection); assertEquals(1, requester.getFailures()); @@ -122,13 +117,12 @@ public class JRTConfigRequesterTest { @Test public void testUnknownConfigDefinitionError() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(new ErrorResponseHandler(ErrorCode.UNKNOWN_DEFINITION)); + JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); sub.setConfig(1L, false, config(), PayloadChecksums.empty()); - final MockConnection connection = new MockConnection(new ErrorResponseHandler(ErrorCode.UNKNOWN_DEFINITION)); - JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); assertEquals(requester.getConnectionPool(), connection); requester.request(sub); waitUntilResponse(connection); @@ -137,13 +131,12 @@ public class JRTConfigRequesterTest { @Test public void testClosedSubscription() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(new MockConnection.OKResponseHandler()); + JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); sub.close(); - final MockConnection connection = new MockConnection(new MockConnection.OKResponseHandler()); - JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); requester.request(sub); assertEquals(1, connection.getNumberOfRequests()); // Check that no further request was sent? @@ -157,16 +150,14 @@ public class JRTConfigRequesterTest { @Test public void testTimeout() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - final TimingValues timingValues = getTestTimingValues(); - JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); + TimingValues timingValues = getTestTimingValues(); + MockConnection connection = new MockConnection(new DelayedResponseHandler(timingValues.getSubscribeTimeout()), + 2); // fake that we have more than one source + JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); + JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(requester, timingValues); sub.close(); - final MockConnection connection = new MockConnection( - new DelayedResponseHandler(timingValues.getSubscribeTimeout()), - 2); // fake that we have more than one source - JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); - requester.request(createSubscription(subscriber, timingValues)); + requester.request(createSubscription(requester, timingValues)); // Check that no further request was sent? try { Thread.sleep(timingValues.getFixedDelay()*2); @@ -175,9 +166,10 @@ public class JRTConfigRequesterTest { } } - private JRTConfigSubscription<SimpletypesConfig> createSubscription(ConfigSubscriber subscriber, TimingValues timingValues) { - return new JRTConfigSubscription<>( - new ConfigKey<>(SimpletypesConfig.class, "testid"), subscriber, null, timingValues); + private JRTConfigSubscription<SimpletypesConfig> createSubscription(JRTConfigRequester requester, TimingValues timingValues) { + return new JRTConfigSubscription<>(new ConfigKey<>(SimpletypesConfig.class, "testid"), + requester, + timingValues); } private SimpletypesConfig config() { diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestFactoryTest.java b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestFactoryTest.java index 76af5610bc0..14183aa087a 100644 --- a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestFactoryTest.java +++ b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestFactoryTest.java @@ -1,11 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; -import com.yahoo.foo.FunctionTestConfig; -import com.yahoo.config.subscription.ConfigSet; -import com.yahoo.config.subscription.ConfigSubscriber; +import com.yahoo.config.subscription.ConfigSourceSet; +import com.yahoo.config.subscription.impl.JRTConfigRequester; import com.yahoo.config.subscription.impl.JRTConfigSubscription; +import com.yahoo.foo.FunctionTestConfig; import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; import org.junit.Test; @@ -42,11 +43,13 @@ public class JRTConfigRequestFactoryTest { @Test public void testCreateFromSub() { - ConfigSubscriber subscriber = new ConfigSubscriber(); Class<FunctionTestConfig> clazz = FunctionTestConfig.class; final String configId = "foo"; - JRTConfigSubscription<FunctionTestConfig> sub = new JRTConfigSubscription<>( - new ConfigKey<>(clazz, configId), subscriber, new ConfigSet(), new TimingValues()); + TimingValues timingValues = new TimingValues(); + JRTConfigSubscription<FunctionTestConfig> sub = + new JRTConfigSubscription<>(new ConfigKey<>(clazz, configId), + new JRTConfigRequester(new JRTConnectionPool(new ConfigSourceSet("tcp/localhost:12345")), timingValues), + timingValues); JRTClientConfigRequest request = JRTConfigRequestFactory.createFromSub(sub); assertThat(request.getVespaVersion().get(), is(defaultVespaVersion)); diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java index 5a29e494d81..dabd87e1eec 100644 --- a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java +++ b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java @@ -1,10 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; -import com.yahoo.config.subscription.ConfigSet; import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.config.subscription.ConfigSubscriber; -import com.yahoo.config.subscription.impl.GenericConfigSubscriber; import com.yahoo.config.subscription.impl.JRTConfigRequester; import com.yahoo.config.subscription.impl.JRTConfigSubscription; import com.yahoo.config.subscription.impl.MockConnection; @@ -17,6 +14,7 @@ import com.yahoo.test.ManualClock; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.ErrorCode; +import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; @@ -24,7 +22,6 @@ import com.yahoo.vespa.config.util.ConfigUtils; import org.junit.Before; import org.junit.Test; -import java.util.Collections; import java.util.List; import java.util.Optional; @@ -191,8 +188,11 @@ public class JRTConfigRequestV3Test { @Test public void created_from_subscription() { - ConfigSubscriber subscriber = new ConfigSubscriber(); - JRTConfigSubscription<SimpletypesConfig> sub = new JRTConfigSubscription<>(new ConfigKey<>(SimpletypesConfig.class, configId), subscriber, new ConfigSet(), new TimingValues()); + TimingValues timingValues = new TimingValues(); + JRTConfigSubscription<SimpletypesConfig> sub = + new JRTConfigSubscription<>(new ConfigKey<>(SimpletypesConfig.class, configId), + new JRTConfigRequester(new JRTConnectionPool(new ConfigSourceSet("tcp/localhost:985")), timingValues), + timingValues); JRTClientConfigRequest request = createReq(sub, Trace.createNew(9)); assertThat(request.getConfigKey().getName(), is(SimpletypesConfig.CONFIG_DEF_NAME)); JRTServerConfigRequest serverRequest = createReq(request.getRequest()); @@ -209,9 +209,10 @@ public class JRTConfigRequestV3Test { } }); - ConfigSourceSet src = new ConfigSourceSet(); - ConfigSubscriber subscriber = new GenericConfigSubscriber(Collections.singletonMap(src, new JRTConfigRequester(connection, new TimingValues()))); - JRTConfigSubscription<SimpletypesConfig> sub = new JRTConfigSubscription<>(new ConfigKey<>(SimpletypesConfig.class, configId), subscriber, src, new TimingValues()); + TimingValues timingValues = new TimingValues(); + JRTConfigSubscription<SimpletypesConfig> sub = new JRTConfigSubscription<>(new ConfigKey<>(SimpletypesConfig.class, configId), + new JRTConfigRequester(connection, timingValues), + timingValues); sub.subscribe(120_0000); assertTrue(sub.nextConfig(120_0000)); sub.close(); diff --git a/config/src/vespa/config/common/configvalue.cpp b/config/src/vespa/config/common/configvalue.cpp index d770c126d38..da1cbfc792c 100644 --- a/config/src/vespa/config/common/configvalue.cpp +++ b/config/src/vespa/config/common/configvalue.cpp @@ -55,7 +55,7 @@ ConfigValue::getLegacyFormat() const return lines; } -const vespalib::string +vespalib::string ConfigValue::asJson() const { if (_payload) { const vespalib::slime::Inspector & payload(_payload->getSlimePayload()); diff --git a/config/src/vespa/config/common/configvalue.h b/config/src/vespa/config/common/configvalue.h index 553a609b9db..bf4c320c061 100644 --- a/config/src/vespa/config/common/configvalue.h +++ b/config/src/vespa/config/common/configvalue.h @@ -35,8 +35,8 @@ public: const vespalib::string & getLine(int i) const { return _lines.at(i); } const std::vector<vespalib::string> & getLines() const { return _lines; } std::vector<vespalib::string> getLegacyFormat() const; - const vespalib::string asJson() const; - const vespalib::string getXxhash64() const { return _xxhash64; } + vespalib::string asJson() const; + const vespalib::string& getXxhash64() const { return _xxhash64; } void serializeV1(::vespalib::slime::Cursor & cursor) const; void serializeV2(::vespalib::slime::Cursor & cursor) const; diff --git a/config/src/vespa/config/common/configvalue.hpp b/config/src/vespa/config/common/configvalue.hpp index 3bf149f17b1..665ce69c7a5 100644 --- a/config/src/vespa/config/common/configvalue.hpp +++ b/config/src/vespa/config/common/configvalue.hpp @@ -8,7 +8,7 @@ ConfigValue::newInstance() const { if (_payload) { const vespalib::slime::Inspector & payload(_payload->getSlimePayload()); - return std::unique_ptr<ConfigType>(new ConfigType(config::ConfigPayload(payload))); + return std::unique_ptr<ConfigType>(new ConfigType(::config::ConfigPayload(payload))); } else { return std::unique_ptr<ConfigType>(new ConfigType(*this)); } diff --git a/config/src/vespa/config/frt/frtconfigagent.cpp b/config/src/vespa/config/frt/frtconfigagent.cpp index 2b66e806270..827ef75251b 100644 --- a/config/src/vespa/config/frt/frtconfigagent.cpp +++ b/config/src/vespa/config/frt/frtconfigagent.cpp @@ -71,7 +71,7 @@ FRTConfigAgent::handleUpdatedGeneration(const ConfigKey & key, const ConfigState if (LOG_WOULD_LOG(spam)) { LOG(spam, "updating holder for key %s,", key.toString().c_str()); } - _holder->handle(ConfigUpdate::UP(new ConfigUpdate(_latest, changed, newState.generation))); + _holder->handle(std::make_unique<ConfigUpdate>(_latest, changed, newState.generation)); _numConfigured++; } diff --git a/config/src/vespa/config/helper/configfetcher.cpp b/config/src/vespa/config/helper/configfetcher.cpp index 7a6f806c6ff..d85308bbcbb 100644 --- a/config/src/vespa/config/helper/configfetcher.cpp +++ b/config/src/vespa/config/helper/configfetcher.cpp @@ -9,9 +9,11 @@ LOG_SETUP(".config.helper.configfetcher"); namespace config { +VESPA_THREAD_STACK_TAG(config_fetcher_thread); + ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context) : _poller(context), - _thread(std::make_unique<vespalib::Thread>(_poller)), + _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)), _closed(false), _started(false) { @@ -19,7 +21,7 @@ ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context) ConfigFetcher::ConfigFetcher(const SourceSpec & spec) : _poller(std::make_shared<ConfigContext>(spec)), - _thread(std::make_unique<vespalib::Thread>(_poller)), + _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)), _closed(false), _started(false) { diff --git a/config/src/vespa/config/retriever/simpleconfigurer.cpp b/config/src/vespa/config/retriever/simpleconfigurer.cpp index 74022cfd6a3..5059b9997f5 100644 --- a/config/src/vespa/config/retriever/simpleconfigurer.cpp +++ b/config/src/vespa/config/retriever/simpleconfigurer.cpp @@ -8,10 +8,12 @@ LOG_SETUP(".config.retriever.simpleconfigurer"); namespace config { +VESPA_THREAD_STACK_TAG(simple_configurer_thread); + SimpleConfigurer::SimpleConfigurer(SimpleConfigRetriever::UP retriever, SimpleConfigurable * const configurable) : _retriever(std::move(retriever)), _configurable(configurable), - _thread(*this), + _thread(*this, simple_configurer_thread), _started(false) { assert(_retriever); diff --git a/config/src/vespa/config/subscription/configsubscription.cpp b/config/src/vespa/config/subscription/configsubscription.cpp index 4aaa30323c7..9790541906b 100644 --- a/config/src/vespa/config/subscription/configsubscription.cpp +++ b/config/src/vespa/config/subscription/configsubscription.cpp @@ -31,7 +31,11 @@ ConfigSubscription::nextUpdate(int64_t generation, std::chrono::milliseconds tim if (_closed || !_holder->poll()) { return false; } + auto old = std::move(_next); _next = _holder->provide(); + if (old) { + _next->merge(*old); + } if (isGenerationNewer(_next->getGeneration(), generation)) { return true; } @@ -98,7 +102,7 @@ ConfigSubscription::flip() _current = std::move(_next); _lastGenerationChanged = _current->getGeneration(); } else { - _current.reset(new ConfigUpdate(_current->getValue(), false, _next->getGeneration())); + _current = std::make_unique<ConfigUpdate>(_current->getValue(), false, _next->getGeneration()); } _isChanged = change; } diff --git a/configdefinitions/abi-spec.json b/configdefinitions/abi-spec.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/configdefinitions/abi-spec.json @@ -0,0 +1 @@ +{}
\ No newline at end of file diff --git a/configdefinitions/pom.xml b/configdefinitions/pom.xml index d618ba151bf..78aa8b59222 100644 --- a/configdefinitions/pom.xml +++ b/configdefinitions/pom.xml @@ -60,6 +60,10 @@ <updateReleaseInfo>true</updateReleaseInfo> </configuration> </plugin> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> </plugins> </build> </project> diff --git a/configdefinitions/src/vespa/summary.def b/configdefinitions/src/vespa/summary.def index 5169df7d72e..9b231d86c38 100644 --- a/configdefinitions/src/vespa/summary.def +++ b/configdefinitions/src/vespa/summary.def @@ -2,6 +2,9 @@ namespace=vespa.config.search defaultsummaryid int default=-1 + +usev8geopositions bool default=false + classes[].id int classes[].name string classes[].omitsummaryfeatures bool default=false diff --git a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java index 548bd14ccb3..937d04b35bd 100644 --- a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java +++ b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java @@ -142,7 +142,19 @@ public class BuilderGenerator { return "public Builder " + n.getName() + "(" + builderType(n) + " " + INTERNAL_PREFIX + "builder) {\n" + // " " + n.getName() + " = " + INTERNAL_PREFIX + "builder;\n" + // " return this;\n" + // + "}\n" + // + "/**\n" + // + " * Make a new builder and run the supplied function on it before adding it to the list\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + " = __inner;\n" + // + " return this;\n" + // "}"; + } private static String innerArraySetters(InnerCNode n) { @@ -157,6 +169,18 @@ public class BuilderGenerator { "}\n" + // "\n" + // "/**\n" + // + " * Make a new builder and run the supplied function on it before adding it to the list\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + ".add(__inner);\n" + // + " return this;\n" + // + "}\n" + // + "\n" + // + "/**\n" + // " * Set the given list as this builder's list of " + nodeClass(n) + " builders\n" + // " * @param __builders a list of builders\n" + // " * @return this builder\n" + // @@ -195,8 +219,7 @@ public class BuilderGenerator { } private static String innerMapSetters(CNode n) { - return "public Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "key, " + builderType(n) + " " + INTERNAL_PREFIX - + "value) {\n" + // + String r = "public Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "key, " + builderType(n) + " " + INTERNAL_PREFIX + "value) {\n" + // " " + n.getName() + ".put(" + INTERNAL_PREFIX + "key, " + INTERNAL_PREFIX + "value);\n" + // " return this;\n" + // "}\n" + // @@ -205,6 +228,22 @@ public class BuilderGenerator { " " + n.getName() + ".putAll(" + INTERNAL_PREFIX + "values);\n" + // " return this;\n" + // "}"; + if (n instanceof InnerCNode) { + r = r + + "\n\n" + // + "/**\n" + // + " * Make a new builder and run the supplied function on it before using it as the value\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(String __key, java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + ".put(__key, __inner);\n" + // + " return this;\n" + // + "}"; + } + return r; } private static String privateLeafMapSetter(CNode n) { diff --git a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java index f9b36974e15..cce2d66b5f0 100644 --- a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java +++ b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java @@ -116,6 +116,7 @@ public class JavaClassBuilderTest { JavaClassBuilder builder = new JavaClassBuilder(root, parser.getNormalizedDefinition(), null, null); String[] configClassLines = builder.getConfigClass("AllfeaturesConfig").split("\n"); + for (var line : configClassLines) { System.out.println(line); } for (int i = 0; i < referenceClassLines.size(); i++) { if (configClassLines.length <= i) fail("Missing lines i generated config class. First missing line:\n" + referenceClassLines.get(i)); diff --git a/configgen/src/test/resources/allfeatures.reference b/configgen/src/test/resources/allfeatures.reference index e0b6176efa2..7698ea6727c 100644 --- a/configgen/src/test/resources/allfeatures.reference +++ b/configgen/src/test/resources/allfeatures.reference @@ -565,11 +565,33 @@ public final class AllfeaturesConfig extends ConfigInstance { basic_struct = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder basic_struct(java.util.function.Consumer<Basic_struct.Builder> __func) { + Basic_struct.Builder __inner = new Basic_struct.Builder(); + __func.accept(__inner); + basic_struct = __inner; + return this; + } public Builder struct_of_struct(Struct_of_struct.Builder __builder) { struct_of_struct = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder struct_of_struct(java.util.function.Consumer<Struct_of_struct.Builder> __func) { + Struct_of_struct.Builder __inner = new Struct_of_struct.Builder(); + __func.accept(__inner); + struct_of_struct = __inner; + return this; + } /** * Add the given builder to this builder's list of MyArray builders @@ -582,6 +604,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder myArray(java.util.function.Consumer<MyArray.Builder> __func) { + MyArray.Builder __inner = new MyArray.Builder(); + __func.accept(__inner); + myArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of MyArray builders * @param __builders a list of builders * @return this builder @@ -601,6 +635,18 @@ public final class AllfeaturesConfig extends ConfigInstance { return this; } + /** + * Make a new builder and run the supplied function on it before using it as the value + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder myMap(String __key, java.util.function.Consumer<MyMap.Builder> __func) { + MyMap.Builder __inner = new MyMap.Builder(); + __func.accept(__inner); + myMap.put(__key, __inner); + return this; + } + private boolean _applyOnRestart = false; @java.lang.Override @@ -1305,11 +1351,33 @@ public final class AllfeaturesConfig extends ConfigInstance { inner0 = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder inner0(java.util.function.Consumer<Inner0.Builder> __func) { + Inner0.Builder __inner = new Inner0.Builder(); + __func.accept(__inner); + inner0 = __inner; + return this; + } public Builder inner1(Inner1.Builder __builder) { inner1 = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder inner1(java.util.function.Consumer<Inner1.Builder> __func) { + Inner1.Builder __inner = new Inner1.Builder(); + __func.accept(__inner); + inner1 = __inner; + return this; + } public Struct_of_struct build() { return new Struct_of_struct(this); @@ -1616,6 +1684,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder anotherArray(java.util.function.Consumer<AnotherArray.Builder> __func) { + AnotherArray.Builder __inner = new AnotherArray.Builder(); + __func.accept(__inner); + anotherArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of AnotherArray builders * @param __builders a list of builders * @return this builder @@ -1914,6 +1994,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder anotherArray(java.util.function.Consumer<AnotherArray.Builder> __func) { + AnotherArray.Builder __inner = new AnotherArray.Builder(); + __func.accept(__inner); + anotherArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of AnotherArray builders * @param __builders a list of builders * @return this builder diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java index 00dc1f4d065..69098ea0030 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java @@ -1081,7 +1081,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye public double getQuotaUsageRate(ApplicationId applicationId) { var application = getApplication(applicationId); return application.getModel().provisioned().all().values().stream() - .map(Capacity::maxResources) + .map(Capacity::maxResources)// TODO: This may be unspecified -> 0 .mapToDouble(resources -> resources.nodes() * resources.nodeResources().cost()) .sum(); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java index ea823e5befb..1b55d17fd36 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java @@ -200,6 +200,8 @@ public class ModelContextImpl implements ModelContext { private final boolean asyncApplyBucketDiff; private final boolean ignoreThreadStackSizes; private final boolean unorderedMergeChaining; + private final boolean useV8GeoPositions; + private final boolean useV8DocManagerCfg; public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); @@ -238,6 +240,8 @@ public class ModelContextImpl implements ModelContext { this.asyncApplyBucketDiff = flagValue(source, appId, Flags.ASYNC_APPLY_BUCKET_DIFF); this.ignoreThreadStackSizes = flagValue(source, appId, Flags.IGNORE_THREAD_STACK_SIZES); this.unorderedMergeChaining = flagValue(source, appId, Flags.UNORDERED_MERGE_CHAINING); + this.useV8GeoPositions = flagValue(source, appId, Flags.USE_V8_GEO_POSITIONS); + this.useV8DocManagerCfg = flagValue(source, appId, Flags.USE_V8_DOC_MANAGER_CFG); } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @@ -278,6 +282,8 @@ public class ModelContextImpl implements ModelContext { @Override public boolean asyncApplyBucketDiff() { return asyncApplyBucketDiff; } @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean unorderedMergeChaining() { return unorderedMergeChaining; } + @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } + @Override public boolean useV8DocManagerCfg() { return useV8DocManagerCfg; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionFactory.java index 3e3c7066927..1027cc6a237 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionFactory.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionFactory.java @@ -32,7 +32,7 @@ public class FileDistributionFactory implements AutoCloseable { } public FileDistribution createFileDistribution() { - return new FileDistributionImpl(getFileReferencesDir(), supervisor); + return new FileDistributionImpl(supervisor); } public AddFileInterface createFileManager(File applicationDir) { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java index 605f5924e68..abb8a3e8487 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java @@ -10,7 +10,6 @@ import com.yahoo.jrt.StringArray; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Target; -import java.io.File; import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; @@ -24,26 +23,21 @@ public class FileDistributionImpl implements FileDistribution, RequestWaiter { private final static double rpcTimeout = 1.0; private final Supervisor supervisor; - private final File fileReferencesDir; - public FileDistributionImpl(File fileReferencesDir, Supervisor supervisor) { - this.fileReferencesDir = fileReferencesDir; + public FileDistributionImpl(Supervisor supervisor) { this.supervisor = supervisor; } + /** + * Notifies client which file references it should start downloading. It's OK if the call does not succeed, + * as this is just a hint to the client to start downloading. Currently the only client is the config server + * + * @param hostName host which should be notified about file references to download + * @param port port which should be used when notifying + * @param fileReferences set of file references to start downloading + */ @Override public void startDownload(String hostName, int port, Set<FileReference> fileReferences) { - startDownloadingFileReferences(hostName, port, fileReferences); - } - - @Override - public File getFileReferencesDir() { - return fileReferencesDir; - } - - // Notifies client which file references it should start downloading. It's OK if the call does not succeed, - // as this is just a hint to the client to start downloading. Currently the only client is the config server - private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) { Target target = supervisor.connect(new Spec(hostName, port)); Request request = new Request("filedistribution.setFileReferencesToDownload"); request.setContext(target); @@ -52,7 +46,6 @@ public class FileDistributionImpl implements FileDistribution, RequestWaiter { target.invokeAsync(request, rpcTimeout, this); } - @Override public void handleRequestDone(Request req) { Target target = (Target) req.getContext(); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java index f4801c5a7ea..81cd1dd9738 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java @@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; import com.yahoo.vespa.config.ConnectionPool; -import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.filedistribution.CompressedFileReference; import com.yahoo.vespa.filedistribution.EmptyFileReferenceData; @@ -22,8 +21,6 @@ import com.yahoo.vespa.filedistribution.FileReferenceData; import com.yahoo.vespa.filedistribution.FileReferenceDownload; import com.yahoo.vespa.filedistribution.LazyFileReferenceData; import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData; -import com.yahoo.vespa.flags.FlagSource; -import com.yahoo.vespa.flags.Flags; import com.yahoo.yolean.Exceptions; import java.io.File; @@ -40,6 +37,7 @@ import java.util.logging.Logger; import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.getOtherConfigServersInCluster; public class FileServer { + private static final Logger log = Logger.getLogger(FileServer.class.getName()); private final FileDirectory root; @@ -77,15 +75,14 @@ public class FileServer { @SuppressWarnings("WeakerAccess") // Created by dependency injection @Inject - public FileServer(ConfigserverConfig configserverConfig, FlagSource flagSource) { + public FileServer(ConfigserverConfig configserverConfig) { this(new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())), - createFileDownloader(getOtherConfigServersInCluster(configserverConfig), - Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value())); + createFileDownloader(getOtherConfigServersInCluster(configserverConfig))); } // For testing only public FileServer(File rootDir) { - this(rootDir, createFileDownloader(List.of(), true)); + this(rootDir, createFileDownloader(List.of())); } public FileServer(File rootDir, FileDownloader fileDownloader) { @@ -103,7 +100,7 @@ public class FileServer { try { return root.getFile(reference).exists(); } catch (IllegalArgumentException e) { - log.log(Level.FINE, () -> "Failed locating file reference '" + reference + "' with error " + e.toString()); + log.log(Level.FINE, () -> "Failed locating " + reference + ": " + e.getMessage()); } return false; } @@ -121,7 +118,7 @@ public class FileServer { private void serveFile(FileReference reference, Receiver target) { File file = root.getFile(reference); - log.log(Level.FINE, () -> "Start serving reference '" + reference.value() + "' with file '" + file.getAbsolutePath() + "'"); + log.log(Level.FINE, () -> "Start serving " + reference + " with file '" + file.getAbsolutePath() + "'"); boolean success = false; String errorDescription = "OK"; FileReferenceData fileData = EmptyFileReferenceData.empty(reference, file.getName()); @@ -129,15 +126,16 @@ public class FileServer { fileData = readFileReferenceData(reference); success = true; } catch (IOException e) { - errorDescription = "For file reference '" + reference.value() + "': failed reading file '" + file.getAbsolutePath() + "'"; + errorDescription = "For" + reference.value() + ": failed reading file '" + file.getAbsolutePath() + "'"; log.warning(errorDescription + " for sending to '" + target.toString() + "'. " + e.toString()); + fileData.close(); } try { target.receive(fileData, new ReplayStatus(success ? 0 : 1, success ? "OK" : errorDescription)); - log.log(Level.FINE, () -> "Done serving file reference '" + reference.value() + "' with file '" + file.getAbsolutePath() + "'"); + log.log(Level.FINE, () -> "Done serving " + reference.value() + " with file '" + file.getAbsolutePath() + "'"); } catch (Exception e) { - log.log(Level.WARNING, "Failed serving file reference '" + reference.value() + "': " + Exceptions.toMessageString(e)); + log.log(Level.WARNING, "Failed serving " + reference + ": " + Exceptions.toMessageString(e)); } finally { fileData.close(); } @@ -157,19 +155,19 @@ public class FileServer { public void serveFile(String fileReference, boolean downloadFromOtherSourceIfNotFound, Request request, Receiver receiver) { if (executor instanceof ThreadPoolExecutor) - log.log(Level.FINE, () -> "Active threads is now " + ((ThreadPoolExecutor) executor).getActiveCount()); + log.log(Level.FINE, () -> "Active threads: " + ((ThreadPoolExecutor) executor).getActiveCount()); executor.execute(() -> serveFileInternal(fileReference, downloadFromOtherSourceIfNotFound, request, receiver)); } private void serveFileInternal(String fileReference, boolean downloadFromOtherSourceIfNotFound, Request request, Receiver receiver) { - log.log(Level.FINE, () -> "Received request for reference '" + fileReference + "' from " + request.target()); + log.log(Level.FINE, () -> "Received request for file reference '" + fileReference + "' from " + request.target()); boolean fileExists; try { String client = request.target().toString(); FileReferenceDownload fileReferenceDownload = new FileReferenceDownload(new FileReference(fileReference), - downloadFromOtherSourceIfNotFound, - client); + client, + downloadFromOtherSourceIfNotFound); fileExists = hasFileDownloadIfNeeded(fileReferenceDownload); if (fileExists) startFileServing(fileReference, receiver); } catch (IllegalArgumentException e) { @@ -192,9 +190,15 @@ public class FileServer { if (fileReferenceDownload.downloadFromOtherSourceIfNotFound()) { log.log(Level.FINE, "File not found, downloading from another source"); // Create new FileReferenceDownload with downloadFromOtherSourceIfNotFound set to false - // to avoid config servers requesting a file reference perpetually, e.g. for a file that does not exist anymore - FileReferenceDownload newDownload = new FileReferenceDownload(fileReference, false, fileReferenceDownload.client()); - return downloader.getFile(newDownload).isPresent(); + // to avoid config servers requesting a file reference perpetually, e.g. for a file that + // does not exist anymore + FileReferenceDownload newDownload = new FileReferenceDownload(fileReference, + fileReferenceDownload.client(), + false); + boolean fileExists = downloader.getFile(newDownload).isPresent(); + if ( ! fileExists) + log.log(Level.WARNING, "Failed downloading '" + fileReferenceDownload + "'"); + return fileExists; } else { log.log(Level.FINE, "File not found, will not download from another source, since request came from another config server"); return false; @@ -208,22 +212,19 @@ public class FileServer { executor.shutdown(); } - private static FileDownloader createFileDownloader(List<String> configServers, boolean useFileDistributionConnectionPool) { + private static FileDownloader createFileDownloader(List<String> configServers) { Supervisor supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true); return new FileDownloader(configServers.isEmpty() ? FileDownloader.emptyConnectionPool() - : createConnectionPool(configServers, supervisor, useFileDistributionConnectionPool), + : createConnectionPool(configServers, supervisor), supervisor); } - private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor, boolean useFileDistributionConnectionPool) { + private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor) { ConfigSourceSet configSourceSet = new ConfigSourceSet(configServers); - if (configServers.size() == 0) return FileDownloader.emptyConnectionPool(); - return useFileDistributionConnectionPool - ? new FileDistributionConnectionPool(configSourceSet, supervisor) - : new JRTConnectionPool(configSourceSet, supervisor); + return new FileDistributionConnectionPool(configSourceSet, supervisor); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/MockFileDistribution.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/MockFileDistribution.java deleted file mode 100644 index 99889f38bb9..00000000000 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/MockFileDistribution.java +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.config.server.filedistribution; - -import com.yahoo.config.FileReference; -import com.yahoo.config.model.api.FileDistribution; - -import java.io.File; -import java.util.Set; - -/** - * @author Ulf Lilleengen - */ -public class MockFileDistribution implements FileDistribution { - private final File fileReferencesDir; - - MockFileDistribution(File fileReferencesDir) { - this.fileReferencesDir = fileReferencesDir; - } - - @Override - public void startDownload(String hostName, int port, Set<FileReference> fileReferences) {} - - @Override - public File getFileReferencesDir() { return fileReferencesDir; } - -} diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java new file mode 100644 index 00000000000..43ed16ab21c --- /dev/null +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java @@ -0,0 +1,294 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config.server.http.v1; + +import com.google.inject.Inject; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.jdisc.http.HttpRequest; +import com.yahoo.path.Path; +import com.yahoo.restapi.RestApi; +import com.yahoo.restapi.RestApiException; +import com.yahoo.restapi.RestApiRequestHandler; +import com.yahoo.restapi.SlimeJsonResponse; +import com.yahoo.slime.Cursor; +import com.yahoo.slime.Slime; +import com.yahoo.slime.SlimeUtils; +import com.yahoo.vespa.curator.Curator; +import com.yahoo.vespa.curator.transaction.CuratorOperations; +import com.yahoo.vespa.curator.transaction.CuratorTransaction; +import com.yahoo.yolean.Exceptions; + +import java.time.Clock; +import java.time.Instant; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +/** + * This implements the /routing/v1/status REST API on the config server, providing explicit control over the routing + * status of a deployment or zone (all deployments). The routing status manipulated by this is only respected by the + * shared routing layer. + * + * @author bjorncs + * @author mpolden + */ +public class RoutingStatusApiHandler extends RestApiRequestHandler<RoutingStatusApiHandler> { + + private static final Logger log = Logger.getLogger(RoutingStatusApiHandler.class.getName()); + + private static final Path ROUTING_ROOT = Path.fromString("/routing/v1/"); + private static final Path DEPLOYMENT_STATUS_ROOT = ROUTING_ROOT.append("status"); + private static final Path ZONE_STATUS = ROUTING_ROOT.append("zone-inactive"); + + private final Curator curator; + private final Clock clock; + + @Inject + public RoutingStatusApiHandler(Context context, Curator curator) { + this(context, curator, Clock.systemUTC()); + } + + RoutingStatusApiHandler(Context context, Curator curator, Clock clock) { + super(context, RoutingStatusApiHandler::createRestApiDefinition); + this.curator = Objects.requireNonNull(curator); + this.clock = Objects.requireNonNull(clock); + + curator.create(DEPLOYMENT_STATUS_ROOT); + } + + private static RestApi createRestApiDefinition(RoutingStatusApiHandler self) { + return RestApi.builder() + .addRoute(RestApi.route("/routing/v1/status") + .get(self::listInactiveDeployments)) + .addRoute(RestApi.route("/routing/v1/status/zone") + .get(self::zoneStatus) + .put(self::changeZoneStatus) + .delete(self::changeZoneStatus)) + .addRoute(RestApi.route("/routing/v1/status/{upstreamName}") + .get(self::getDeploymentStatus) + .put(self::changeDeploymentStatus)) + .build(); + } + + /** Get upstream of all deployments with status OUT */ + private SlimeJsonResponse listInactiveDeployments(RestApi.RequestContext context) { + List<String> inactiveDeployments = curator.getChildren(DEPLOYMENT_STATUS_ROOT).stream() + .filter(upstreamName -> deploymentStatus(upstreamName).status() == RoutingStatus.out) + .sorted() + .collect(Collectors.toUnmodifiableList()); + Slime slime = new Slime(); + Cursor rootArray = slime.setArray(); + inactiveDeployments.forEach(rootArray::addString); + return new SlimeJsonResponse(slime); + } + + /** Get the routing status of a deployment */ + private SlimeJsonResponse getDeploymentStatus(RestApi.RequestContext context) { + String upstreamName = upstreamName(context); + DeploymentRoutingStatus deploymentRoutingStatus = deploymentStatus(upstreamName); + // If the entire zone is out, we always return OUT regardless of the actual routing status + if (zoneStatus() == RoutingStatus.out) { + String reason = String.format("Rotation is OUT because the zone is OUT (actual deployment status is %s)", + deploymentRoutingStatus.status().name().toUpperCase(Locale.ENGLISH)); + deploymentRoutingStatus = new DeploymentRoutingStatus(RoutingStatus.out, "operator", reason, + clock.instant()); + } + return new SlimeJsonResponse(toSlime(deploymentRoutingStatus)); + } + + /** Change routing status of a deployment */ + private SlimeJsonResponse changeDeploymentStatus(RestApi.RequestContext context) { + Set<String> upstreamNames = upstreamNames(context); + ApplicationId instance = instance(context); + RestApi.RequestContext.RequestContent requestContent = context.requestContentOrThrow(); + Slime requestBody = Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(requestContent.content().readAllBytes())); + DeploymentRoutingStatus wantedStatus = deploymentRoutingStatusFromSlime(requestBody, clock.instant()); + List<DeploymentRoutingStatus> currentStatuses = upstreamNames.stream() + .map(this::deploymentStatus) + .collect(Collectors.toList()); + DeploymentRoutingStatus currentStatus = currentStatuses.get(0); + // Redeploy application so that a new LbServicesConfig containing the updated status is generated and consumed + // by routing layer. This is required to update status of upstreams in application endpoints + log.log(Level.INFO, "Changing routing status of " + instance + " from " + + currentStatus.status() + " to " + wantedStatus.status()); + boolean needsChange = currentStatuses.stream().anyMatch(status -> status.status() != wantedStatus.status()); + if (needsChange) { + changeStatus(upstreamNames, wantedStatus); + } + return new SlimeJsonResponse(toSlime(wantedStatus)); + } + + /** Change routing status of a zone */ + private SlimeJsonResponse changeZoneStatus(RestApi.RequestContext context) { + boolean in = context.request().getMethod() == HttpRequest.Method.DELETE; + log.log(Level.INFO, "Changing routing status of zone from " + zoneStatus() + " to " + + (in ? RoutingStatus.in : RoutingStatus.out)); + if (in) { + curator.delete(ZONE_STATUS); + return new SlimeJsonResponse(toSlime(RoutingStatus.in)); + } else { + curator.create(ZONE_STATUS); + return new SlimeJsonResponse(toSlime(RoutingStatus.out)); + } + } + + /** Read the status for zone */ + private SlimeJsonResponse zoneStatus(RestApi.RequestContext context) { + return new SlimeJsonResponse(toSlime(zoneStatus())); + } + + /** Change the status of one or more upstream names */ + private void changeStatus(Set<String> upstreamNames, DeploymentRoutingStatus newStatus) { + CuratorTransaction transaction = new CuratorTransaction(curator); + for (var upstreamName : upstreamNames) { + Path path = deploymentStatusPath(upstreamName); + if (curator.exists(path)) { + transaction.add(CuratorOperations.delete(path.getAbsolute())); + } + transaction.add(CuratorOperations.create(path.getAbsolute(), toJsonBytes(newStatus))); + } + transaction.commit(); + } + + /** Read the status for a deployment */ + private DeploymentRoutingStatus deploymentStatus(String upstreamName) { + Instant changedAt = clock.instant(); + Path path = deploymentStatusPath(upstreamName); + Optional<byte[]> data = curator.getData(path); + if (data.isEmpty()) { + return new DeploymentRoutingStatus(RoutingStatus.in, "", "", changedAt); + } + String agent = ""; + String reason = ""; + RoutingStatus status = RoutingStatus.out; + if (data.get().length > 0) { // Compatibility with old format, where no data is stored + Slime slime = SlimeUtils.jsonToSlime(data.get()); + Cursor root = slime.get(); + status = asRoutingStatus(root.field("status").asString()); + agent = root.field("agent").asString(); + reason = root.field("cause").asString(); + changedAt = Instant.ofEpochSecond(root.field("lastUpdate").asLong()); + } + return new DeploymentRoutingStatus(status, agent, reason, changedAt); + } + + private RoutingStatus zoneStatus() { + return curator.exists(ZONE_STATUS) ? RoutingStatus.out : RoutingStatus.in; + } + + protected Path deploymentStatusPath(String upstreamName) { + return DEPLOYMENT_STATUS_ROOT.append(upstreamName); + } + + private static String upstreamName(RestApi.RequestContext context) { + return upstreamNames(context).iterator().next(); + } + + private static Set<String> upstreamNames(RestApi.RequestContext context) { + Set<String> upstreamNames = Arrays.stream(context.pathParameters().getStringOrThrow("upstreamName") + .split(",")) + .collect(Collectors.toSet()); + if (upstreamNames.isEmpty()) { + throw new RestApiException.BadRequest("At least one upstream name must be specified"); + } + for (var upstreamName : upstreamNames) { + if (upstreamName.contains(" ")) { + throw new RestApiException.BadRequest("Invalid upstream name: '" + upstreamName + "'"); + } + } + return upstreamNames; + } + + private static ApplicationId instance(RestApi.RequestContext context) { + return context.queryParameters().getString("application") + .map(ApplicationId::fromSerializedForm) + .orElseThrow(() -> new RestApiException.BadRequest("Missing application parameter")); + } + + private byte[] toJsonBytes(DeploymentRoutingStatus status) { + return Exceptions.uncheck(() -> SlimeUtils.toJsonBytes(toSlime(status))); + } + + private Slime toSlime(DeploymentRoutingStatus status) { + Slime slime = new Slime(); + Cursor root = slime.setObject(); + root.setString("status", asString(status.status())); + root.setString("cause", status.reason()); + root.setString("agent", status.agent()); + root.setLong("lastUpdate", status.changedAt().getEpochSecond()); + return slime; + } + + private static Slime toSlime(RoutingStatus status) { + Slime slime = new Slime(); + Cursor root = slime.setObject(); + root.setString("status", asString(status)); + return slime; + } + + private static RoutingStatus asRoutingStatus(String s) { + switch (s) { + case "IN": return RoutingStatus.in; + case "OUT": return RoutingStatus.out; + } + throw new IllegalArgumentException("Unknown status: '" + s + "'"); + } + + private static String asString(RoutingStatus status) { + switch (status) { + case in: return "IN"; + case out: return "OUT"; + } + throw new IllegalArgumentException("Unknown status: " + status); + } + + private static DeploymentRoutingStatus deploymentRoutingStatusFromSlime(Slime slime, Instant changedAt) { + Cursor root = slime.get(); + return new DeploymentRoutingStatus(asRoutingStatus(root.field("status").asString()), + root.field("agent").asString(), + root.field("cause").asString(), + changedAt); + } + + private static class DeploymentRoutingStatus { + + private final RoutingStatus status; + private final String agent; + private final String reason; + private final Instant changedAt; + + public DeploymentRoutingStatus(RoutingStatus status, String agent, String reason, Instant changedAt) { + this.status = Objects.requireNonNull(status); + this.agent = Objects.requireNonNull(agent); + this.reason = Objects.requireNonNull(reason); + this.changedAt = Objects.requireNonNull(changedAt); + } + + public RoutingStatus status() { + return status; + } + + public String agent() { + return agent; + } + + public String reason() { + return reason; + } + + public Instant changedAt() { + return changedAt; + } + + } + + private enum RoutingStatus { + in, out + } + +} diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java index 08c300220df..47eabb0347e 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java @@ -7,21 +7,21 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; -import com.yahoo.vespa.config.JRTConnectionPool; +import com.yahoo.vespa.config.ConnectionPool; import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.session.Session; import com.yahoo.vespa.config.server.session.SessionRepository; import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.defaults.Defaults; +import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.filedistribution.FileDownloader; import com.yahoo.vespa.filedistribution.FileReferenceDownload; -import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.flags.FlagSource; -import com.yahoo.vespa.flags.Flags; import java.io.File; import java.time.Duration; +import java.util.List; import java.util.logging.Logger; import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.fileReferenceExistsOnDisk; @@ -41,9 +41,8 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer { private final ApplicationRepository applicationRepository; private final File downloadDirectory; private final ConfigserverConfig configserverConfig; - private final Supervisor supervisor; - private final boolean useFileDistributionConnectionPool; - + private final Supervisor supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true); + private final FileDownloader fileDownloader; ApplicationPackageMaintainer(ApplicationRepository applicationRepository, Curator curator, @@ -52,9 +51,8 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer { super(applicationRepository, curator, flagSource, applicationRepository.clock().instant(), interval, false); this.applicationRepository = applicationRepository; this.configserverConfig = applicationRepository.configserverConfig(); - this.supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true); this.downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); - this.useFileDistributionConnectionPool = Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value(); + this.fileDownloader = createFileDownloader(configserverConfig, downloadDirectory, supervisor); } @Override @@ -64,48 +62,52 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer { int attempts = 0; int failures = 0; - try (var fileDownloader = createFileDownloader()) { - for (var applicationId : applicationRepository.listApplications()) { - log.finest(() -> "Verifying application package for " + applicationId); - Session session = applicationRepository.getActiveSession(applicationId); - if (session == null) continue; // App might be deleted after call to listApplications() or not activated yet (bootstrap phase) - - FileReference appFileReference = session.getApplicationPackageReference(); - if (appFileReference != null) { - long sessionId = session.getSessionId(); - attempts++; - if (! fileReferenceExistsOnDisk(downloadDirectory, appFileReference)) { - log.fine(() -> "Downloading application package for " + applicationId + " (session " + sessionId + ")"); - - FileReferenceDownload download = new FileReferenceDownload(appFileReference, - false, - this.getClass().getSimpleName()); - if (fileDownloader.getFile(download).isEmpty()) { - failures++; - log.warning("Failed to download application package (" + appFileReference + ")" + - " for " + applicationId + " (session " + sessionId + ")"); - continue; - } + for (var applicationId : applicationRepository.listApplications()) { + log.finest(() -> "Verifying application package for " + applicationId); + Session session = applicationRepository.getActiveSession(applicationId); + if (session == null) + continue; // App might be deleted after call to listApplications() or not activated yet (bootstrap phase) + + FileReference appFileReference = session.getApplicationPackageReference(); + if (appFileReference != null) { + long sessionId = session.getSessionId(); + attempts++; + if (!fileReferenceExistsOnDisk(downloadDirectory, appFileReference)) { + log.fine(() -> "Downloading application package for " + applicationId + " (session " + sessionId + ")"); + + FileReferenceDownload download = new FileReferenceDownload(appFileReference, + this.getClass().getSimpleName(), + false); + if (fileDownloader.getFile(download).isEmpty()) { + failures++; + log.info("Failed downloading application package (" + appFileReference + ")" + + " for " + applicationId + " (session " + + applicationRepository.getActiveSession(applicationId) + ")"); + continue; } - createLocalSessionIfMissing(applicationId, sessionId); } + createLocalSessionIfMissing(applicationId, sessionId); } } return asSuccessFactor(attempts, failures); } - private FileDownloader createFileDownloader() { - ConfigSourceSet configSourceSet = new ConfigSourceSet(getOtherConfigServersInCluster(configserverConfig)); - return new FileDownloader(useFileDistributionConnectionPool - ? new FileDistributionConnectionPool(configSourceSet, supervisor) - : new JRTConnectionPool(configSourceSet, supervisor), - supervisor, - downloadDirectory); + private static FileDownloader createFileDownloader(ConfigserverConfig configserverConfig, + File downloadDirectory, + Supervisor supervisor) { + List<String> otherConfigServersInCluster = getOtherConfigServersInCluster(configserverConfig); + ConfigSourceSet configSourceSet = new ConfigSourceSet(otherConfigServersInCluster); + + ConnectionPool connectionPool = (otherConfigServersInCluster.isEmpty()) + ? FileDownloader.emptyConnectionPool() + : new FileDistributionConnectionPool(configSourceSet, supervisor); + return new FileDownloader(connectionPool, supervisor, downloadDirectory, Duration.ofSeconds(30)); } @Override public void awaitShutdown() { supervisor.transport().shutdown().join(); + fileDownloader.close(); super.awaitShutdown(); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java index 6ea32a32dd1..99ffff6403b 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java @@ -583,8 +583,8 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { .map(FileReference::new) .forEach(fileReference -> downloader.downloadIfNeeded( new FileReferenceDownload(fileReference, - false, /* downloadFromOtherSourceIfNotFound */ - req.target().toString()))); + req.target().toString(), + false /* downloadFromOtherSourceIfNotFound */))); req.returnValues().add(new Int32Value(0)); }); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java index d753bee771c..bb7dedac6d1 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java @@ -66,7 +66,7 @@ public abstract class Session implements Comparable<Session> { @Override public String toString() { - return "Session,id=" + sessionId; + return "Session,id=" + sessionId + ",status=" + getStatus(); } public long getActiveSessionAtCreate() { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java index ce592c3282a..aaacc9f69e0 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java @@ -127,7 +127,8 @@ public class SessionPreparer { AllocatedHosts allocatedHosts = preparation.buildModels(now); preparation.makeResult(allocatedHosts); if ( ! params.isDryRun()) { - preparation.writeStateZK(); + FileReference fileReference = preparation.startDistributionOfApplicationPackage(); + preparation.writeStateZK(fileReference); preparation.writeEndpointCertificateMetadataZK(); preparation.writeContainerEndpointsZK(); } @@ -226,16 +227,18 @@ public class SessionPreparer { } } - Optional<FileReference> distributedApplicationPackage() { + FileReference startDistributionOfApplicationPackage() { FileReference fileReference = fileRegistry.addApplicationPackage(); FileDistribution fileDistribution = fileDistributionFactory.createFileDistribution(); - log.log(Level.FINE, () -> "Distribute application package for " + applicationId + " (" + fileReference + ") to other config servers"); - properties.configServerSpecs().stream() - .filter(spec -> ! spec.getHostName().equals(HostName.getLocalhost())) - .forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference))); - - checkTimeout("distributeApplicationPackage"); - return Optional.of(fileReference); + log.log(Level.FINE, () -> "Ask other config servers to download application package for " + + applicationId + " (" + fileReference + ")"); + properties.configServerSpecs() + .stream() + .filter(spec -> !spec.getHostName().equals(HostName.getLocalhost())) + .forEach(spec -> fileDistribution.startDownload(spec.getHostName(), spec.getConfigServerPort(), Set.of(fileReference))); + + checkTimeout("startDistributionOfApplicationPackage"); + return fileReference; } void preprocess() { @@ -261,12 +264,12 @@ public class SessionPreparer { checkTimeout("making result from models"); } - void writeStateZK() { + void writeStateZK(FileReference filereference) { log.log(Level.FINE, "Writing application package state to zookeeper"); writeStateToZooKeeper(sessionZooKeeperClient, preprocessedApplicationPackage, applicationId, - distributedApplicationPackage(), + filereference, dockerImageRepository, vespaVersion, logger, @@ -306,7 +309,7 @@ public class SessionPreparer { private void writeStateToZooKeeper(SessionZooKeeperClient zooKeeperClient, ApplicationPackage applicationPackage, ApplicationId applicationId, - Optional<FileReference> distributedApplicationPackage, + FileReference fileReference, Optional<DockerImage> dockerImageRepository, Version vespaVersion, DeployLogger deployLogger, @@ -321,7 +324,7 @@ public class SessionPreparer { zkDeployer.deploy(applicationPackage, fileRegistryMap, allocatedHosts); // Note: When changing the below you need to also change similar calls in SessionRepository.createSessionFromExisting() zooKeeperClient.writeApplicationId(applicationId); - zooKeeperClient.writeApplicationPackageReference(distributedApplicationPackage); + zooKeeperClient.writeApplicationPackageReference(Optional.of(fileReference)); zooKeeperClient.writeVespaVersion(vespaVersion); zooKeeperClient.writeDockerImageRepository(dockerImageRepository); zooKeeperClient.writeAthenzDomain(athenzDomain); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java index 95be59e4d26..07cdc910253 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java @@ -346,16 +346,25 @@ public class SessionRepository { } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { + List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper(); + log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper); + int deleted = 0; - for (long sessionId : getRemoteSessionsFromZooKeeper()) { + for (long sessionId : remoteSessionsFromZooKeeper) { Session session = remoteSessionCache.get(sessionId); - if (session == null) continue; // Internal sessions not in sync with zk, continue + if (session == null) { + log.log(Level.FINE, () -> "Remote session " + sessionId + " is null, creating a new one"); + session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId)); + } if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } + // Avoid deleting too many in one run + if (deleted >= 2) + break; } return deleted; } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java index 55986e71b3d..b813d56b345 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java @@ -30,16 +30,19 @@ public class ContainerEndpointSerializer { private static final String clusterIdField = "clusterId"; private static final String scopeField = "scope"; private static final String namesField = "names"; + private static final String weightField = "weight"; + private static final String routingMethodField = "routingMethod"; private ContainerEndpointSerializer() {} public static ContainerEndpoint endpointFromSlime(Inspector inspector) { final var clusterId = inspector.field(clusterIdField).asString(); - // Currently assigned endpoints that do not have scope should be interpreted as global endpoints - // TODO: Remove default assignment after 7.500 - final var scope = SlimeUtils.optionalString(inspector.field(scopeField)).orElse(ApplicationClusterEndpoint.Scope.global.name()); + final var scope = inspector.field(scopeField).asString(); final var namesInspector = inspector.field(namesField); - + final var weight = SlimeUtils.optionalInteger(inspector.field(weightField)); + // assign default routingmethod. Remove when 7.507 is latest version + // Cannot be used before all endpoints are assigned explicit routingmethod (from controller) + final var routingMethod = SlimeUtils.optionalString(inspector.field(routingMethodField)).orElse(ApplicationClusterEndpoint.RoutingMethod.sharedLayer4.name()); if (clusterId.isEmpty()) { throw new IllegalStateException("'clusterId' missing on serialized ContainerEndpoint"); } @@ -52,6 +55,10 @@ public class ContainerEndpointSerializer { throw new IllegalStateException("'names' missing on serialized ContainerEndpoint"); } + if(routingMethod.isEmpty()) { + throw new IllegalStateException("'routingMethod' missing on serialized ContainerEndpoint"); + } + final var names = new ArrayList<String>(); namesInspector.traverse((ArrayTraverser) (idx, nameInspector) -> { @@ -59,7 +66,8 @@ public class ContainerEndpointSerializer { names.add(containerName); }); - return new ContainerEndpoint(clusterId, ApplicationClusterEndpoint.Scope.valueOf(scope), names); + return new ContainerEndpoint(clusterId, ApplicationClusterEndpoint.Scope.valueOf(scope), names, weight, + ApplicationClusterEndpoint.RoutingMethod.valueOf(routingMethod)); } public static List<ContainerEndpoint> endpointListFromSlime(Slime slime) { @@ -81,9 +89,10 @@ public class ContainerEndpointSerializer { public static void endpointToSlime(Cursor cursor, ContainerEndpoint endpoint) { cursor.setString(clusterIdField, endpoint.clusterId()); cursor.setString(scopeField, endpoint.scope().name()); - + endpoint.weight().ifPresent(w -> cursor.setLong(weightField, w)); final var namesInspector = cursor.setArray(namesField); endpoint.names().forEach(namesInspector::addString); + cursor.setString(routingMethodField, endpoint.routingMethod().name()); } public static Slime endpointListToSlime(List<ContainerEndpoint> endpoints) { diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver index efee86be29f..317af4b2fea 100755 --- a/configserver/src/main/sh/start-configserver +++ b/configserver/src/main/sh/start-configserver @@ -177,7 +177,6 @@ vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \ --add-opens=java.base/java.nio=ALL-UNNAMED \ --add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ - --add-opens=java.base/sun.security.util=ALL-UNNAMED \ -Djava.io.tmpdir=${VESPA_HOME}/tmp \ -Djava.library.path=${VESPA_HOME}/lib64 \ -Djava.awt.headless=true \ diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java index eaffda056a8..02cc2e603d6 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java @@ -203,7 +203,7 @@ public class TenantApplicationsTest { configNames = applications.listConfigs(ApplicationId.defaultId(), Optional.of(vespaVersion), true); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "container", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "", "document.config"))); - assertTrue(configNames.contains(new ConfigKey<>("documenttypes", "", "document"))); + assertTrue(configNames.contains(new ConfigKey<>("documenttypes", "", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("documentmanager", "container", "document.config"))); assertTrue(configNames.contains(new ConfigKey<>("health-monitor", "container", "container.jdisc.config"))); assertTrue(configNames.contains(new ConfigKey<>("specific", "container", "project"))); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java index f85ca37a351..67c40f94b6a 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java @@ -10,7 +10,6 @@ import com.yahoo.net.HostName; import com.yahoo.vespa.filedistribution.FileDownloader; import com.yahoo.vespa.filedistribution.FileReferenceData; import com.yahoo.vespa.filedistribution.FileReferenceDownload; -import com.yahoo.vespa.flags.InMemoryFlagSource; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -57,7 +56,7 @@ public class FileServerTest { public void requireThatNonExistingFileWillBeDownloaded() throws IOException { String dir = "123"; assertFalse(fileServer.hasFile(dir)); - FileReferenceDownload foo = new FileReferenceDownload(new FileReference(dir)); + FileReferenceDownload foo = new FileReferenceDownload(new FileReference(dir), "test"); assertFalse(fileServer.hasFileDownloadIfNeeded(foo)); writeFile(dir); assertTrue(fileServer.hasFileDownloadIfNeeded(foo)); @@ -118,7 +117,7 @@ public class FileServerTest { private FileServer createFileServer(ConfigserverConfig.Builder configBuilder) throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); configBuilder.fileReferencesDir(fileReferencesDir.getAbsolutePath()); - return new FileServer(new ConfigserverConfig(configBuilder), new InMemoryFlagSource()); + return new FileServer(new ConfigserverConfig(configBuilder)); } private static class FileReceiver implements FileServer.Receiver { diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java new file mode 100644 index 00000000000..e2b45d33cbc --- /dev/null +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java @@ -0,0 +1,155 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config.server.http.v1; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.container.jdisc.HttpRequestBuilder; +import com.yahoo.container.jdisc.HttpResponse; +import com.yahoo.jdisc.http.HttpRequest.Method; +import com.yahoo.path.Path; +import com.yahoo.restapi.RestApiTestDriver; +import com.yahoo.test.ManualClock; +import com.yahoo.vespa.curator.Curator; +import com.yahoo.vespa.curator.mock.MockCurator; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.List; + +import static com.yahoo.yolean.Exceptions.uncheck; +import static org.junit.Assert.assertEquals; + +/** + * @author bjorncs + * @author mpolden + */ +public class RoutingStatusApiHandlerTest { + + private static final ApplicationId instance = ApplicationId.from("t1", "a1", "i1"); + private static final String upstreamName = "test-upstream-name"; + + private final Curator curator = new MockCurator(); + private final ManualClock clock = new ManualClock(); + + private RestApiTestDriver testDriver; + + @Before + public void before() { + RoutingStatusApiHandler requestHandler = new RoutingStatusApiHandler(RestApiTestDriver.createHandlerTestContext(), + curator, + clock); + testDriver = RestApiTestDriver.newBuilder(requestHandler).build(); + } + + @Test + public void list_deployment_status() { + List<String> expected = List.of("foo", "bar"); + for (String upstreamName : expected) { + executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), + statusOut()); + } + String actual = responseAsString(executeRequest(Method.GET, "/routing/v1/status", null)); + assertEquals("[\"bar\",\"foo\"]", actual); + } + + @Test + public void get_deployment_status() { + String response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), null)); + assertEquals(response("IN", "", "", clock.instant()), response); + } + + @Test + public void set_deployment_status() { + String response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), + statusOut())); + assertEquals(response("OUT", "issue-XXX", "operator", clock.instant()), response); + + // Read status stored in old format (path exists, but without content) + curator.set(Path.fromString("/routing/v1/status/" + upstreamName), new byte[0]); + response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), null)); + assertEquals(response("OUT", "", "", clock.instant()), response); + + // Change status of multiple upstreams + String upstreamName2 = "upstream2"; + String upstreams = upstreamName + "," + upstreamName2 + "," + upstreamName2; + response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreams + "?application=" + instance.serializedForm(), + statusOut())); + String outResponse = response("OUT", "issue-XXX", "operator", clock.instant()); + assertEquals(outResponse, response); + for (var upstreamName : List.of(upstreamName, upstreamName2)) { + response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), null)); + assertEquals(outResponse, response); + } + } + + @Test + public void fail_on_invalid_upstream_name() { + HttpResponse response = executeRequest(Method.GET, "/routing/v1/status/" + upstreamName + "%20invalid", null); + assertEquals(400, response.getStatus()); + } + + @Test + public void fail_on_changing_routing_status_without_request_content() { + HttpResponse response = executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), null); + assertEquals(400, response.getStatus()); + } + + @Test + public void zone_status_out_overrides_deployment_status() { + // Setting zone out overrides deployment status + executeRequest(Method.PUT, "/routing/v1/status/zone", null); + String response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), null)); + assertEquals(response("OUT", "Rotation is OUT because the zone is OUT (actual deployment status is IN)", "operator", clock.instant()), response); + + // Setting zone back in falls back to deployment status, which is also out + executeRequest(Method.DELETE, "/routing/v1/status/zone", null); + String response2 = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), + statusOut())); + assertEquals(response("OUT", "issue-XXX", "operator", clock.instant()), response2); + + // Deployment status is changed to in + String response3 = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), + requestContent("IN", "all good"))); + assertEquals(response("IN", "all good", "operator", clock.instant()), response3); + } + + @Test + public void set_zone_status() { + executeRequest(Method.PUT, "/routing/v1/status/zone", null); + String response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/zone", null)); + assertEquals("{\"status\":\"OUT\"}", response); + executeRequest(Method.DELETE, "/routing/v1/status/zone", null); + response = responseAsString(executeRequest(Method.GET, "/routing/v1/status/zone", null)); + assertEquals("{\"status\":\"IN\"}", response); + } + + private HttpResponse executeRequest(Method method, String path, String requestContent) { + var builder = HttpRequestBuilder.create(method, path); + if (requestContent != null) { + builder.withRequestContent(new ByteArrayInputStream(requestContent.getBytes(StandardCharsets.UTF_8))); + } + return testDriver.executeRequest(builder.build()); + } + + private static String responseAsString(HttpResponse response) { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + uncheck(() -> response.render(out)); + return out.toString(StandardCharsets.UTF_8); + } + + private static String statusOut() { + return requestContent("OUT", "issue-XXX"); + } + + private static String requestContent(String status, String cause) { + return "{\"status\": \"" + status + "\", \"agent\":\"operator\", \"cause\": \"" + cause + "\"}"; + } + + private static String response(String status, String reason, String agent, Instant instant) { + return "{\"status\":\"" + status + "\",\"cause\":\"" + reason + "\",\"agent\":\"" + agent + "\",\"lastUpdate\":" + instant.getEpochSecond() + "}"; + } + +} diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java index 2c97d0b9382..cd824967fc3 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java @@ -43,6 +43,8 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.is; @@ -90,18 +92,21 @@ public class PrepareParamsTest { public void testCorrectParsingWithContainerEndpoints() throws IOException { var endpoints = List.of(new ContainerEndpoint("qrs1", ApplicationClusterEndpoint.Scope.global, List.of("c1.example.com", - "c2.example.com")), + "c2.example.com"), OptionalInt.of(3)), new ContainerEndpoint("qrs2",ApplicationClusterEndpoint.Scope.global, List.of("c3.example.com", "c4.example.com"))); var param = "[\n" + " {\n" + " \"clusterId\": \"qrs1\",\n" + - " \"names\": [\"c1.example.com\", \"c2.example.com\"]\n" + + " \"names\": [\"c1.example.com\", \"c2.example.com\"],\n" + + " \"scope\": \"global\",\n" + + " \"weight\": 3\n" + " },\n" + " {\n" + " \"clusterId\": \"qrs2\",\n" + - " \"names\": [\"c3.example.com\", \"c4.example.com\"]\n" + + " \"names\": [\"c3.example.com\", \"c4.example.com\"],\n" + + " \"scope\": \"global\"\n" + " }\n" + "]"; diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java index 08e6a353fbb..79632b8446b 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java @@ -63,6 +63,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; import java.util.Set; import java.util.logging.Level; @@ -248,14 +249,18 @@ public class SessionPreparerTest { " \"names\": [\n" + " \"foo.app1.tenant1.global.vespa.example.com\",\n" + " \"rotation-042.vespa.global.routing\"\n" + - " ]\n" + + " ],\n" + + " \"scope\": \"global\", \n" + + " \"routingMethod\": \"shared\"\n" + " },\n" + " {\n" + " \"clusterId\": \"bar\",\n" + " \"names\": [\n" + " \"bar.app1.tenant1.global.vespa.example.com\",\n" + " \"rotation-043.vespa.global.routing\"\n" + - " ]\n" + + " ],\n" + + " \"scope\": \"global\",\n" + + " \"routingMethod\": \"sharedLayer4\"\n" + " }\n" + "]"; var applicationId = applicationId("test"); @@ -267,11 +272,15 @@ public class SessionPreparerTest { var expected = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("foo.app1.tenant1.global.vespa.example.com", - "rotation-042.vespa.global.routing")), + "rotation-042.vespa.global.routing"), + OptionalInt.empty(), + ApplicationClusterEndpoint.RoutingMethod.shared), new ContainerEndpoint("bar", ApplicationClusterEndpoint.Scope.global, List.of("bar.app1.tenant1.global.vespa.example.com", - "rotation-043.vespa.global.routing"))); + "rotation-043.vespa.global.routing"), + OptionalInt.empty(), + ApplicationClusterEndpoint.RoutingMethod.sharedLayer4)); assertEquals(expected, readContainerEndpoints(applicationId)); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java index 2d767cfded4..c8f31697c5e 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java @@ -7,6 +7,8 @@ import com.yahoo.slime.Slime; import org.junit.Test; import java.util.List; +import java.util.OptionalInt; +import java.util.OptionalLong; import static org.junit.Assert.assertEquals; @@ -33,24 +35,8 @@ public class ContainerEndpointSerializerTest { } @Test - public void readEndpointWithoutScope() { - final var slime = new Slime(); - final var entry = slime.setObject(); - - entry.setString("clusterId", "foobar"); - final var entryNames = entry.setArray("names"); - entryNames.addString("a"); - entryNames.addString("b"); - - final var endpoint = ContainerEndpointSerializer.endpointFromSlime(slime.get()); - assertEquals("foobar", endpoint.clusterId()); - assertEquals(ApplicationClusterEndpoint.Scope.global, endpoint.scope()); - assertEquals(List.of("a", "b"), endpoint.names()); - } - - @Test public void writeReadSingleEndpoint() { - final var endpoint = new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b")); + final var endpoint = new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b"), OptionalInt.of(1)); final var serialized = new Slime(); ContainerEndpointSerializer.endpointToSlime(serialized.setObject(), endpoint); final var deserialized = ContainerEndpointSerializer.endpointFromSlime(serialized.get()); @@ -60,7 +46,7 @@ public class ContainerEndpointSerializerTest { @Test public void writeReadEndpoints() { - final var endpoints = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b"))); + final var endpoints = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b"), OptionalInt.of(3), ApplicationClusterEndpoint.RoutingMethod.shared)); final var serialized = ContainerEndpointSerializer.endpointListToSlime(endpoints); final var deserialized = ContainerEndpointSerializer.endpointListFromSlime(serialized); diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json index 8c0f3e5fd80..a6783d1e5f5 100644 --- a/container-core/abi-spec.json +++ b/container-core/abi-spec.json @@ -1043,16 +1043,23 @@ "public com.yahoo.jdisc.http.ConnectorConfig$Builder tcpKeepAliveEnabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder tcpNoDelay(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder throttling(com.yahoo.jdisc.http.ConnectorConfig$Throttling$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder throttling(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder implicitTlsEnabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder ssl(com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder ssl(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder tlsClientAuthEnforcer(com.yahoo.jdisc.http.ConnectorConfig$TlsClientAuthEnforcer$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder tlsClientAuthEnforcer(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder healthCheckProxy(com.yahoo.jdisc.http.ConnectorConfig$HealthCheckProxy$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder healthCheckProxy(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder proxyProtocol(com.yahoo.jdisc.http.ConnectorConfig$ProxyProtocol$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder proxyProtocol(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder secureRedirect(com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder secureRedirect(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder maxRequestsPerConnection(int)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder maxConnectionLife(double)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2Enabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2(com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2(java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -1661,9 +1668,7 @@ }, "com.yahoo.jdisc.http.HttpRequest": { "superClass": "com.yahoo.jdisc.Request", - "interfaces": [ - "com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpRequest" - ], + "interfaces": [], "attributes": [ "public" ], @@ -1723,9 +1728,7 @@ }, "com.yahoo.jdisc.http.HttpResponse": { "superClass": "com.yahoo.jdisc.Response", - "interfaces": [ - "com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse" - ], + "interfaces": [], "attributes": [ "public" ], @@ -1816,17 +1819,23 @@ "public com.yahoo.jdisc.http.ServerConfig$Builder maxKeepAliveRequests(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder removeRawPostBodyForWwwUrlEncodedPost(boolean)", "public com.yahoo.jdisc.http.ServerConfig$Builder filter(com.yahoo.jdisc.http.ServerConfig$Filter$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder filter(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder filter(java.util.List)", "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(com.yahoo.jdisc.http.ServerConfig$DefaultFilters$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(java.util.List)", "public com.yahoo.jdisc.http.ServerConfig$Builder strictFiltering(boolean)", "public com.yahoo.jdisc.http.ServerConfig$Builder maxWorkerThreads(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder minWorkerThreads(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder stopTimeout(double)", "public com.yahoo.jdisc.http.ServerConfig$Builder jmx(com.yahoo.jdisc.http.ServerConfig$Jmx$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder jmx(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder metric(com.yahoo.jdisc.http.ServerConfig$Metric$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder metric(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder accessLog(com.yahoo.jdisc.http.ServerConfig$AccessLog$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder accessLog(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder connectionLog(com.yahoo.jdisc.http.ServerConfig$ConnectionLog$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder connectionLog(java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -2071,6 +2080,7 @@ "public void <init>(com.yahoo.jdisc.http.ServletPathsConfig)", "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.lang.String, com.yahoo.jdisc.http.ServletPathsConfig$Servlets$Builder)", "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.util.Map)", + "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -2168,15 +2178,14 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public", - "abstract" + "public" ], "methods": [ - "public void <init>(com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpRequest)", - "public abstract java.lang.String getMethod()", + "public void <init>(com.yahoo.jdisc.http.HttpRequest)", + "public java.lang.String getMethod()", "public com.yahoo.jdisc.http.HttpRequest$Version getVersion()", "public java.net.URI getUri()", - "public abstract void setUri(java.net.URI)", + "public void setUri(java.net.URI)", "public com.yahoo.jdisc.http.HttpRequest getParentRequest()", "public java.lang.String getRemoteAddr()", "public void setRemoteAddr(java.lang.String)", @@ -2186,8 +2195,8 @@ "public void setAttribute(java.lang.String, java.lang.Object)", "public boolean containsAttribute(java.lang.String)", "public void removeAttribute(java.lang.String)", - "public abstract java.lang.String getParameter(java.lang.String)", - "public abstract java.util.Enumeration getParameterNames()", + "public java.lang.String getParameter(java.lang.String)", + "public java.util.Enumeration getParameterNames()", "public java.util.List getParameterNamesAsList()", "public java.util.Enumeration getParameterValues(java.lang.String)", "public java.util.List getParameterValuesAsList(java.lang.String)", @@ -2198,16 +2207,16 @@ "public java.util.Map getUntreatedParams()", "public com.yahoo.jdisc.HeaderFields getUntreatedHeaders()", "public java.util.List getUntreatedCookies()", - "public abstract void addHeader(java.lang.String, java.lang.String)", + "public void addHeader(java.lang.String, java.lang.String)", "public long getDateHeader(java.lang.String)", - "public abstract java.lang.String getHeader(java.lang.String)", - "public abstract java.util.Enumeration getHeaderNames()", - "public abstract java.util.List getHeaderNamesAsList()", - "public abstract java.util.Enumeration getHeaders(java.lang.String)", - "public abstract java.util.List getHeadersAsList(java.lang.String)", - "public abstract void removeHeaders(java.lang.String)", - "public abstract void setHeaders(java.lang.String, java.lang.String)", - "public abstract void setHeaders(java.lang.String, java.util.List)", + "public java.lang.String getHeader(java.lang.String)", + "public java.util.Enumeration getHeaderNames()", + "public java.util.List getHeaderNamesAsList()", + "public java.util.Enumeration getHeaders(java.lang.String)", + "public java.util.List getHeadersAsList(java.lang.String)", + "public void removeHeaders(java.lang.String)", + "public void setHeaders(java.lang.String, java.lang.String)", + "public void setHeaders(java.lang.String, java.util.List)", "public int getIntHeader(java.lang.String)", "public com.yahoo.container.jdisc.RequestView asRequestView()", "public java.util.List getCookies()", @@ -2222,20 +2231,20 @@ "public void setScheme(java.lang.String, boolean)", "public java.lang.String getServerName()", "public int getServerPort()", - "public abstract java.security.Principal getUserPrincipal()", + "public java.security.Principal getUserPrincipal()", "public boolean isSecure()", "public boolean isUserInRole(java.lang.String)", "public void setOverrideIsUserInRole(boolean)", "public void setRemoteHost(java.lang.String)", "public void setRemoteUser(java.lang.String)", - "public abstract void setUserPrincipal(java.security.Principal)", - "public abstract java.util.List getClientCertificateChain()", + "public void setUserPrincipal(java.security.Principal)", + "public java.util.List getClientCertificateChain()", "public void setUserRoles(java.lang.String[])", "public java.lang.String getContentType()", "public java.lang.String getCharacterEncoding()", "public void setCharacterEncoding(java.lang.String)", "public void addCookie(com.yahoo.jdisc.http.filter.JDiscCookieWrapper)", - "public abstract void clearCookies()", + "public void clearCookies()", "public com.yahoo.jdisc.http.filter.JDiscCookieWrapper[] getWrappedCookies()", "public static boolean isMultipart(com.yahoo.jdisc.http.filter.DiscFilterRequest)" ], @@ -2251,27 +2260,26 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public", - "abstract" + "public" ], "methods": [ - "public void <init>(com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse)", + "public void <init>(com.yahoo.jdisc.http.HttpResponse)", "public java.util.Enumeration getAttributeNames()", "public java.lang.Object getAttribute(java.lang.String)", "public void setAttribute(java.lang.String, java.lang.Object)", "public void removeAttribute(java.lang.String)", "public com.yahoo.jdisc.HeaderFields getUntreatedHeaders()", "public java.util.List getUntreatedCookies()", - "public abstract void setHeader(java.lang.String, java.lang.String)", - "public abstract void removeHeaders(java.lang.String)", - "public abstract void setHeaders(java.lang.String, java.lang.String)", - "public abstract void setHeaders(java.lang.String, java.util.List)", - "public abstract void addHeader(java.lang.String, java.lang.String)", - "public abstract java.lang.String getHeader(java.lang.String)", + "public void setHeader(java.lang.String, java.lang.String)", + "public void removeHeaders(java.lang.String)", + "public void setHeaders(java.lang.String, java.lang.String)", + "public void setHeaders(java.lang.String, java.util.List)", + "public void addHeader(java.lang.String, java.lang.String)", + "public java.lang.String getHeader(java.lang.String)", "public java.util.List getCookies()", - "public abstract void setCookies(java.util.List)", + "public void setCookies(java.util.List)", "public int getStatus()", - "public abstract void setStatus(int)", + "public void setStatus(int)", "public com.yahoo.jdisc.http.HttpResponse getParentResponse()", "public void addCookie(com.yahoo.jdisc.http.filter.JDiscCookieWrapper)", "public void sendError(int)", @@ -2327,25 +2335,7 @@ "public" ], "methods": [ - "public void <init>(com.yahoo.jdisc.http.HttpRequest)", - "public com.yahoo.jdisc.http.HttpRequest getParentRequest()", - "public void setUri(java.net.URI)", - "public java.lang.String getMethod()", - "public java.lang.String getParameter(java.lang.String)", - "public java.util.Enumeration getParameterNames()", - "public void addHeader(java.lang.String, java.lang.String)", - "public java.lang.String getHeader(java.lang.String)", - "public java.util.Enumeration getHeaderNames()", - "public java.util.List getHeaderNamesAsList()", - "public java.util.Enumeration getHeaders(java.lang.String)", - "public java.util.List getHeadersAsList(java.lang.String)", - "public void removeHeaders(java.lang.String)", - "public void setHeaders(java.lang.String, java.lang.String)", - "public void setHeaders(java.lang.String, java.util.List)", - "public java.security.Principal getUserPrincipal()", - "public void setUserPrincipal(java.security.Principal)", - "public java.util.List getClientCertificateChain()", - "public void clearCookies()" + "public void <init>(com.yahoo.jdisc.http.HttpRequest)" ], "fields": [] }, @@ -2420,21 +2410,6 @@ "methods": [], "fields": [] }, - "com.yahoo.jdisc.http.filter.SecurityFilterInvoker": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.http.server.jetty.FilterInvoker" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public javax.servlet.http.HttpServletRequest invokeRequestFilterChain(com.yahoo.jdisc.http.filter.RequestFilter, java.net.URI, javax.servlet.http.HttpServletRequest, com.yahoo.jdisc.handler.ResponseHandler)", - "public void invokeResponseFilterChain(com.yahoo.jdisc.http.filter.ResponseFilter, java.net.URI, javax.servlet.http.HttpServletRequest, javax.servlet.http.HttpServletResponse)" - ], - "fields": [] - }, "com.yahoo.jdisc.http.filter.SecurityRequestFilter": { "superClass": "java.lang.Object", "interfaces": [ @@ -2878,6 +2853,7 @@ "public void <init>(com.yahoo.processing.Request, com.yahoo.processing.request.ErrorMessage)", "public void mergeWith(com.yahoo.processing.Response)", "public com.yahoo.processing.response.DataList data()", + "public static java.util.concurrent.CompletableFuture recursiveFuture(com.yahoo.processing.response.DataList)", "public static com.google.common.util.concurrent.ListenableFuture recursiveComplete(com.yahoo.processing.response.DataList)" ], "fields": [] @@ -3176,8 +3152,9 @@ "public abstract void endResponse()", "public void <init>()", "public void <init>(java.util.concurrent.Executor)", - "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public void deconstruct()", + "public final java.util.concurrent.CompletableFuture renderResponseBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public final com.google.common.util.concurrent.ListenableFuture renderBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public com.yahoo.processing.execution.Execution getExecution()", "public com.yahoo.processing.Response getResponse()", @@ -3223,7 +3200,8 @@ "public void <init>()", "public com.yahoo.processing.rendering.Renderer clone()", "public void init()", - "public abstract com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public abstract java.lang.String getEncoding()", "public abstract java.lang.String getMimeType()", "public bridge synthetic com.yahoo.component.AbstractComponent clone()", @@ -3427,7 +3405,7 @@ "fields": [] }, "com.yahoo.processing.response.AbstractDataList$DrainOnGetFuture": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "com.yahoo.processing.impl.ProcessingFuture", "interfaces": [], "attributes": [ "public", @@ -3439,8 +3417,8 @@ "public boolean isCancelled()", "public com.yahoo.processing.response.DataList get()", "public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)" + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, @@ -3462,6 +3440,7 @@ "public com.yahoo.processing.Request request()", "public com.yahoo.processing.response.IncomingData incoming()", "public com.google.common.util.concurrent.ListenableFuture complete()", + "public java.util.concurrent.CompletableFuture completeFuture()", "public boolean isOrdered()", "public boolean isStreamed()", "public java.lang.String toString()" @@ -3520,6 +3499,7 @@ "public abstract com.yahoo.processing.response.Data get(int)", "public abstract java.util.List asList()", "public abstract com.yahoo.processing.response.IncomingData incoming()", + "public abstract java.util.concurrent.CompletableFuture completeFuture()", "public abstract com.google.common.util.concurrent.ListenableFuture complete()", "public abstract void addDataListener(java.lang.Runnable)", "public void close()" @@ -3540,6 +3520,7 @@ "public final void assignOwner(com.yahoo.processing.response.DataList)", "public com.yahoo.processing.response.DataList getOwner()", "public com.google.common.util.concurrent.ListenableFuture completed()", + "public java.util.concurrent.CompletableFuture completedFuture()", "public synchronized boolean isComplete()", "public synchronized void addLast(com.yahoo.processing.response.Data)", "public synchronized void add(com.yahoo.processing.response.Data)", @@ -3553,26 +3534,29 @@ "fields": [] }, "com.yahoo.processing.response.FutureResponse": { - "superClass": "com.google.common.util.concurrent.ForwardingFuture", - "interfaces": [], + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.concurrent.Future" + ], "attributes": [ "public" ], "methods": [ "public void <init>(java.util.concurrent.Callable, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", - "public com.google.common.util.concurrent.ListenableFutureTask delegate()", + "public java.util.concurrent.FutureTask delegate()", + "public boolean cancel(boolean)", + "public boolean isCancelled()", + "public boolean isDone()", "public com.yahoo.processing.Response get()", "public com.yahoo.processing.Response get(long, java.util.concurrent.TimeUnit)", "public com.yahoo.processing.Request getRequest()", "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.util.concurrent.Future delegate()", - "public bridge synthetic java.lang.Object delegate()" + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, "com.yahoo.processing.response.IncomingData$NullIncomingData$ImmediateFuture": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "com.yahoo.processing.impl.ProcessingFuture", "interfaces": [], "attributes": [ "public" @@ -3584,8 +3568,8 @@ "public boolean isDone()", "public com.yahoo.processing.response.DataList get()", "public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)" + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, @@ -3601,6 +3585,7 @@ "methods": [ "public void <init>(com.yahoo.processing.response.DataList)", "public com.google.common.util.concurrent.ListenableFuture completed()", + "public java.util.concurrent.CompletableFuture completedFuture()", "public com.yahoo.processing.response.DataList getOwner()", "public boolean isComplete()", "public void addLast(com.yahoo.processing.response.Data)", @@ -3624,6 +3609,7 @@ ], "methods": [ "public abstract com.yahoo.processing.response.DataList getOwner()", + "public abstract java.util.concurrent.CompletableFuture completedFuture()", "public abstract com.google.common.util.concurrent.ListenableFuture completed()", "public abstract boolean isComplete()", "public abstract void addLast(com.yahoo.processing.response.Data)", diff --git a/container-core/pom.xml b/container-core/pom.xml index 7e4198bad0d..910781f4e7d 100644 --- a/container-core/pom.xml +++ b/container-core/pom.xml @@ -240,6 +240,12 @@ <scope>provided</scope> </dependency> <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>hosted-zone-api</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <scope>provided</scope> diff --git a/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java b/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java index 99637e08b77..3ae28c2816d 100644 --- a/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java +++ b/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java @@ -142,6 +142,7 @@ public class HandlersConfigurerTestWrapper { // Needed by e.g. SearchHandler bind(Linguistics.class).to(SimpleLinguistics.class).in(Scopes.SINGLETON); bind(Embedder.class).to(Embedder.FailingEmbedder.class).in(Scopes.SINGLETON); + bind(ai.vespa.cloud.ZoneInfo.class).to(MockZoneInfo.class); bind(ContainerThreadPool.class).to(SimpleContainerThreadpool.class); bind(Metric.class).to(MockMetric.class); } diff --git a/container-core/src/main/java/com/yahoo/container/core/config/testutil/MockZoneInfo.java b/container-core/src/main/java/com/yahoo/container/core/config/testutil/MockZoneInfo.java new file mode 100644 index 00000000000..11c14f8e581 --- /dev/null +++ b/container-core/src/main/java/com/yahoo/container/core/config/testutil/MockZoneInfo.java @@ -0,0 +1,17 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.container.core.config.testutil; + +import ai.vespa.cloud.ZoneInfo; + +/** + * A ZoneInfo subclass which can be created (for injection) with an emopty constructor + * + * @author bratseth + */ +public class MockZoneInfo extends ZoneInfo { + + public MockZoneInfo() { + super(ZoneInfo.defaultInfo().application(), ZoneInfo.defaultInfo().zone()); + } + +} diff --git a/container-core/src/main/java/com/yahoo/container/handler/Coverage.java b/container-core/src/main/java/com/yahoo/container/handler/Coverage.java index ef088a1685a..00c3a1d1aae 100644 --- a/container-core/src/main/java/com/yahoo/container/handler/Coverage.java +++ b/container-core/src/main/java/com/yahoo/container/handler/Coverage.java @@ -2,7 +2,7 @@ package com.yahoo.container.handler; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * The coverage report for a result set. diff --git a/container-core/src/main/java/com/yahoo/container/handler/test/MockService.java b/container-core/src/main/java/com/yahoo/container/handler/test/MockService.java index 8f24e2e64a2..b8175802ff7 100644 --- a/container-core/src/main/java/com/yahoo/container/handler/test/MockService.java +++ b/container-core/src/main/java/com/yahoo/container/handler/test/MockService.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.container.handler.test; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; diff --git a/container-core/src/main/java/com/yahoo/container/handler/test/MockServiceHandler.java b/container-core/src/main/java/com/yahoo/container/handler/test/MockServiceHandler.java index c417eb6516e..a06422a1bf4 100644 --- a/container-core/src/main/java/com/yahoo/container/handler/test/MockServiceHandler.java +++ b/container-core/src/main/java/com/yahoo/container/handler/test/MockServiceHandler.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.container.handler.test; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.container.jdisc.HttpRequest; /** diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java index de877bc413d..04780c81be4 100644 --- a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java +++ b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.container.jdisc; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; diff --git a/container-core/src/main/java/com/yahoo/container/servlet/ServletProvider.java b/container-core/src/main/java/com/yahoo/container/servlet/ServletProvider.java deleted file mode 100644 index aabaa6dd378..00000000000 --- a/container-core/src/main/java/com/yahoo/container/servlet/ServletProvider.java +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.container.servlet; - -import javax.servlet.Servlet; - -import com.yahoo.container.di.componentgraph.Provider; -import org.eclipse.jetty.servlet.ServletHolder; - -/** - * @author stiankri - */ -public class ServletProvider implements Provider<ServletHolder> { - - private final ServletHolder servletHolder; - - public ServletProvider(Servlet servlet, ServletConfigConfig servletConfigConfig) { - servletHolder = new ServletHolder(servlet); - servletConfigConfig.map().forEach( (key, value) -> servletHolder.setInitParameter(key, value)); - } - - @Override - public ServletHolder get() { - return servletHolder; - } - - @Override - public void deconstruct() { } -} diff --git a/container-core/src/main/java/com/yahoo/container/servlet/package-info.java b/container-core/src/main/java/com/yahoo/container/servlet/package-info.java index 38c03998bf7..8ecb3cbe827 100644 --- a/container-core/src/main/java/com/yahoo/container/servlet/package-info.java +++ b/container-core/src/main/java/com/yahoo/container/servlet/package-info.java @@ -1,4 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// TODO Vespa 8 Remove export package @ExportPackage package com.yahoo.container.servlet; diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java index bfd412700ea..598a924b327 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java @@ -7,7 +7,6 @@ import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; import com.yahoo.jdisc.handler.RequestHandler; import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpRequest; import com.yahoo.jdisc.service.CurrentContainer; import org.eclipse.jetty.http.HttpURI; import org.eclipse.jetty.util.MultiMap; @@ -31,7 +30,7 @@ import java.util.concurrent.TimeUnit; * @author Anirudha Khanna * @author Einar M R Rosenvinge */ -public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { +public class HttpRequest extends Request { public enum Method { OPTIONS, @@ -74,7 +73,7 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { private final long jvmRelativeCreatedAt = System.nanoTime(); private final HeaderFields trailers = new HeaderFields(); private final Map<String, List<String>> parameters = new HashMap<>(); - private Principal principal; + private volatile Principal principal; private final long connectedAt; private Method method; private Version version; @@ -141,7 +140,6 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { } /** Returns the remote address, or null if unresolved */ - @Override public String getRemoteHostAddress() { if (remoteAddress instanceof InetSocketAddress) { InetAddress remoteInetAddress = ((InetSocketAddress) remoteAddress).getAddress(); @@ -154,7 +152,6 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { } } - @Override public String getRemoteHostName() { if (remoteAddress instanceof InetSocketAddress) { InetAddress remoteInetAddress = ((InetSocketAddress) remoteAddress).getAddress(); @@ -166,7 +163,6 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { } } - @Override public int getRemotePort() { if (remoteAddress instanceof InetSocketAddress) return ((InetSocketAddress) remoteAddress).getPort(); @@ -202,7 +198,6 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { * @param unit the unit to return the time in * @return the timestamp of when the underlying HTTP channel was connected, or request creation time */ - @Override public long getConnectedAt(TimeUnit unit) { return unit.convert(connectedAt, TimeUnit.MILLISECONDS); } @@ -234,7 +229,6 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest { return parameters; } - @Override public void copyHeaders(HeaderFields target) { target.addAll(headers()); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java index e9ff60ade20..20abb251c74 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java @@ -7,7 +7,6 @@ import com.yahoo.jdisc.Response; import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse; import java.util.List; @@ -16,7 +15,7 @@ import java.util.List; * * @author Einar M R Rosenvinge */ -public class HttpResponse extends Response implements ServletOrJdiscHttpResponse { +public class HttpResponse extends Response { private final HeaderFields trailers = new HeaderFields(); private boolean chunkedEncodingEnabled = true; @@ -54,12 +53,10 @@ public class HttpResponse extends Response implements ServletOrJdiscHttpResponse return message; } - @Override public void copyHeaders(HeaderFields target) { target.addAll(headers()); } - @Override public List<Cookie> decodeSetCookieHeader() { return CookieHelper.decodeSetCookieHeader(headers()); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java index a0933484f4f..2580b4a6ac0 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java @@ -7,7 +7,7 @@ import com.yahoo.jdisc.http.Cookie; import com.yahoo.jdisc.http.HttpHeaders; import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.jdisc.http.HttpRequest.Version; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpRequest; +import com.yahoo.jdisc.http.server.jetty.RequestUtils; import java.net.InetSocketAddress; import java.net.URI; @@ -16,6 +16,7 @@ import java.security.cert.X509Certificate; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.Enumeration; @@ -23,21 +24,20 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; /** * The Request class on which all filters will operate upon. - * Test cases that need a concrete - * instance should create a {@link JdiscFilterRequest}. */ -public abstract class DiscFilterRequest { +public class DiscFilterRequest { protected static final String HTTPS_PREFIX = "https"; protected static final int DEFAULT_HTTP_PORT = 80; protected static final int DEFAULT_HTTPS_PORT = 443; - private final ServletOrJdiscHttpRequest parent; + private final HttpRequest parent; protected final Map<String, List<String>> untreatedParams; private final HeaderFields untreatedHeaders; private List<Cookie> untreatedCookies = null; @@ -45,7 +45,7 @@ public abstract class DiscFilterRequest { private String[] roles = null; private boolean overrideIsUserInRole = false; - public DiscFilterRequest(ServletOrJdiscHttpRequest parent) { + public DiscFilterRequest(HttpRequest parent) { this.parent = parent; // save untreated headers from parent @@ -55,7 +55,7 @@ public abstract class DiscFilterRequest { untreatedParams = new HashMap<>(parent.parameters()); } - public abstract String getMethod(); + public String getMethod() { return parent.getMethod().name(); } public Version getVersion() { return parent.getVersion(); @@ -66,10 +66,14 @@ public abstract class DiscFilterRequest { } @Deprecated - public abstract void setUri(URI uri); + public void setUri(URI uri) { parent.setUri(uri); } + /** + * @deprecated Use methods on {@link DiscFilterRequest} instead to inspect request + */ + @Deprecated(forRemoval = true, since = "7.511") public HttpRequest getParentRequest() { - throw new UnsupportedOperationException("getParentRequest is not supported for " + parent.getClass().getName()); + return parent; } /** @@ -125,9 +129,18 @@ public abstract class DiscFilterRequest { parent.context().remove(name); } - public abstract String getParameter(String name); + public String getParameter(String name) { + if(parent.parameters().containsKey(name)) { + return parent.parameters().get(name).get(0); + } + else { + return null; + } + } - public abstract Enumeration<String> getParameterNames(); + public Enumeration<String> getParameterNames() { + return Collections.enumeration(parent.parameters().keySet()); + } public List<String> getParameterNamesAsList() { return new ArrayList<String>(parent.parameters().keySet()); @@ -200,7 +213,9 @@ public abstract class DiscFilterRequest { * Sets a header with the given name and value. * If the header had already been set, the new value overwrites the previous one. */ - public abstract void addHeader(String name, String value); + public void addHeader(String name, String value) { + parent.headers().add(name, value); + } public long getDateHeader(String name) { String value = getHeader(name); @@ -221,31 +236,43 @@ public abstract class DiscFilterRequest { return date.getTime(); } - public abstract String getHeader(String name); + public String getHeader(String name) { + List<String> values = parent.headers().get(name); + if (values == null || values.isEmpty()) { + return null; + } + return values.get(values.size() - 1); + } - public abstract Enumeration<String> getHeaderNames(); + public Enumeration<String> getHeaderNames() { return Collections.enumeration(parent.headers().keySet()); } - public abstract List<String> getHeaderNamesAsList(); + public List<String> getHeaderNamesAsList() { return new ArrayList<>(parent.headers().keySet()); } - public abstract Enumeration<String> getHeaders(String name); + public Enumeration<String> getHeaders(String name) { return Collections.enumeration(getHeadersAsList(name)); } - public abstract List<String> getHeadersAsList(String name); + public List<String> getHeadersAsList(String name) { + List<String> values = parent.headers().get(name); + if(values == null) { + return Collections.emptyList(); + } + return parent.headers().get(name); + } - public abstract void removeHeaders(String name); + public void removeHeaders(String name) { parent.headers().remove(name); } /** * Sets a header with the given name and value. * If the header had already been set, the new value overwrites the previous one. * */ - public abstract void setHeaders(String name, String value); + public void setHeaders(String name, String value) { parent.headers().put(name, value); } /** * Sets a header with the given name and value. * If the header had already been set, the new value overwrites the previous one. * */ - public abstract void setHeaders(String name, List<String> values); + public void setHeaders(String name, List<String> values) { parent.headers().put(name, values); } public int getIntHeader(String name) { String value = getHeader(name); @@ -340,7 +367,7 @@ public abstract class DiscFilterRequest { return port; } - public abstract Principal getUserPrincipal(); + public Principal getUserPrincipal() { return parent.getUserPrincipal(); } public boolean isSecure() { if(getScheme().equalsIgnoreCase(HTTPS_PREFIX)) { @@ -383,13 +410,18 @@ public abstract class DiscFilterRequest { this.remoteUser = remoteUser; } - public abstract void setUserPrincipal(Principal principal); + public void setUserPrincipal(Principal principal) { this.parent.setUserPrincipal(principal); } /** * @return The client certificate chain in ascending order of trust. The first certificate is the one sent from the client. * Returns an empty list if the client did not provide a certificate. */ - public abstract List<X509Certificate> getClientCertificateChain(); + public List<X509Certificate> getClientCertificateChain() { + return Optional.ofNullable(parent.context().get(RequestUtils.JDISC_REQUEST_X509CERT)) + .map(X509Certificate[].class::cast) + .map(Arrays::asList) + .orElse(Collections.emptyList()); + } public void setUserRoles(String[] roles) { this.roles = roles; @@ -437,7 +469,7 @@ public abstract class DiscFilterRequest { } } - public abstract void clearCookies(); + public void clearCookies() { parent.headers().remove(HttpHeaders.Names.COOKIE); } public JDiscCookieWrapper[] getWrappedCookies() { List<Cookie> cookies = getCookies(); diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterResponse.java index fad0f46402d..af768a98d2d 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterResponse.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterResponse.java @@ -1,6 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.http.filter; +import com.yahoo.jdisc.HeaderFields; +import com.yahoo.jdisc.Response; +import com.yahoo.jdisc.http.Cookie; +import com.yahoo.jdisc.http.CookieHelper; +import com.yahoo.jdisc.http.HttpResponse; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -8,31 +14,25 @@ import java.util.Collections; import java.util.Enumeration; import java.util.List; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse; - -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; - - -import com.yahoo.jdisc.http.HttpResponse; - /** - * This class was made abstract from 5.27. Test cases that need - * a concrete instance should create a {@link JdiscFilterResponse}. + * Response type for {@link SecurityResponseFilter}. * - * @author tejalk + * @author Tejal Knot + * @author bjorncs */ -public abstract class DiscFilterResponse { +public class DiscFilterResponse { - private final ServletOrJdiscHttpResponse parent; + private final Response parent; private final HeaderFields untreatedHeaders; private final List<Cookie> untreatedCookies; - public DiscFilterResponse(ServletOrJdiscHttpResponse parent) { + public DiscFilterResponse(HttpResponse parent) { this((Response)parent); } + + DiscFilterResponse(Response parent) { this.parent = parent; this.untreatedHeaders = new HeaderFields(); - parent.copyHeaders(untreatedHeaders); + untreatedHeaders.addAll(parent.headers()); this.untreatedCookies = getCookies(); } @@ -75,53 +75,71 @@ public abstract class DiscFilterResponse { * <p> * If the header had already been set, the new value overwrites the previous one. */ - public abstract void setHeader(String name, String value); + public void setHeader(String name, String value) { + parent.headers().put(name, value); + } - public abstract void removeHeaders(String name); + public void removeHeaders(String name) { + parent.headers().remove(name); + } /** * Sets a header with the given name and value. * <p> * If the header had already been set, the new value overwrites the previous one. */ - public abstract void setHeaders(String name, String value); + public void setHeaders(String name, String value) { + parent.headers().put(name, value); + } /** * Sets a header with the given name and value. * <p> * If the header had already been set, the new value overwrites the previous one. */ - public abstract void setHeaders(String name, List<String> values); + public void setHeaders(String name, List<String> values) { + parent.headers().put(name, values); + } /** * Adds a header with the given name and value * @see com.yahoo.jdisc.HeaderFields#add */ - public abstract void addHeader(String name, String value); + public void addHeader(String name, String value) { + parent.headers().add(name, value); + } - public abstract String getHeader(String name); + public String getHeader(String name) { + List<String> values = parent.headers().get(name); + if (values == null || values.isEmpty()) { + return null; + } + return values.get(values.size() - 1); + } public List<Cookie> getCookies() { - return parent.decodeSetCookieHeader(); + return CookieHelper.decodeSetCookieHeader(parent.headers()); } - public abstract void setCookies(List<Cookie> cookies); + public void setCookies(List<Cookie> cookies) { + CookieHelper.encodeSetCookieHeader(parent.headers(), cookies); + } public int getStatus() { return parent.getStatus(); } - public abstract void setStatus(int status); + public void setStatus(int status) { + parent.setStatus(status); + } /** * Return the parent HttpResponse + * + * @deprecated Use methods on {@link DiscFilterResponse} instead */ - public HttpResponse getParentResponse() { - if (parent instanceof HttpResponse) - return (HttpResponse)parent; - throw new UnsupportedOperationException( - "getParentResponse is not supported for " + parent.getClass().getName()); - } + @Deprecated(forRemoval = true, since = "7.511") + public HttpResponse getParentResponse() { return (HttpResponse) parent; } public void addCookie(JDiscCookieWrapper cookie) { if(cookie != null) { diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java index eaa02680d48..74c3b8adc7d 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java @@ -1,134 +1,17 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.http.filter; -import com.yahoo.jdisc.http.HttpHeaders; import com.yahoo.jdisc.http.HttpRequest; -import com.yahoo.jdisc.http.servlet.ServletRequest; - -import java.net.URI; -import java.security.Principal; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Enumeration; -import java.util.List; -import java.util.Optional; /** * JDisc implementation of a filter request. * - * @since 5.27 */ +@Deprecated(forRemoval = true, since = "7.511") public class JdiscFilterRequest extends DiscFilterRequest { - private final HttpRequest parent; - public JdiscFilterRequest(HttpRequest parent) { super(parent); - this.parent = parent; - } - - public HttpRequest getParentRequest() { - return parent; - } - - @Deprecated - public void setUri(URI uri) { - parent.setUri(uri); - } - - @Override - public String getMethod() { - return parent.getMethod().name(); - } - - @Override - public String getParameter(String name) { - if(parent.parameters().containsKey(name)) { - return parent.parameters().get(name).get(0); - } - else { - return null; - } - } - - @Override - public Enumeration<String> getParameterNames() { - return Collections.enumeration(parent.parameters().keySet()); - } - - @Override - public void addHeader(String name, String value) { - parent.headers().add(name, value); - } - - @Override - public String getHeader(String name) { - List<String> values = parent.headers().get(name); - if (values == null || values.isEmpty()) { - return null; - } - return values.get(values.size() - 1); - } - - public Enumeration<String> getHeaderNames() { - return Collections.enumeration(parent.headers().keySet()); - } - - public List<String> getHeaderNamesAsList() { - return new ArrayList<String>(parent.headers().keySet()); - } - - @Override - public Enumeration<String> getHeaders(String name) { - return Collections.enumeration(getHeadersAsList(name)); - } - - public List<String> getHeadersAsList(String name) { - List<String> values = parent.headers().get(name); - if(values == null) { - return Collections.<String>emptyList(); - } - return parent.headers().get(name); - } - - @Override - public void removeHeaders(String name) { - parent.headers().remove(name); - } - - @Override - public void setHeaders(String name, String value) { - parent.headers().put(name, value); - } - - @Override - public void setHeaders(String name, List<String> values) { - parent.headers().put(name, values); - } - - @Override - public Principal getUserPrincipal() { - return parent.getUserPrincipal(); - } - - @Override - public void setUserPrincipal(Principal principal) { - this.parent.setUserPrincipal(principal); - } - - @Override - public List<X509Certificate> getClientCertificateChain() { - return Optional.ofNullable(parent.context().get(ServletRequest.JDISC_REQUEST_X509CERT)) - .map(X509Certificate[].class::cast) - .map(Arrays::asList) - .orElse(Collections.emptyList()); - } - - @Override - public void clearCookies() { - parent.headers().remove(HttpHeaders.Names.COOKIE); } } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java deleted file mode 100644 index ee2e1be3ebf..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.Response; -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.CookieHelper; -import com.yahoo.jdisc.http.HttpResponse; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse; - -import java.util.List; -import java.util.Map; - -/** - * JDisc implementation of a filter request. - */ -class JdiscFilterResponse extends DiscFilterResponse { - - private final Response parent; - - JdiscFilterResponse(Response parent) { - // A separate adapter is required as DiscFilterResponse will invoke methods from ServletOrJdiscHttpResponse parameter in its constructor - super(parent instanceof HttpResponse ? (HttpResponse)parent : new Adapter(parent)); - this.parent = parent; - } - - @Override - public void setStatus(int status) { - parent.setStatus(status); - } - - @Override - public void setHeader(String name, String value) { - parent.headers().put(name, value); - } - - @Override - public void removeHeaders(String name) { - parent.headers().remove(name); - } - - @Override - public void setHeaders(String name, String value) { - parent.headers().put(name, value); - } - - @Override - public void setHeaders(String name, List<String> values) { - parent.headers().put(name, values); - } - - @Override - public void addHeader(String name, String value) { - parent.headers().add(name, value); - } - - @Override - public String getHeader(String name) { - List<String> values = parent.headers().get(name); - if (values == null || values.isEmpty()) { - return null; - } - return values.get(values.size() - 1); - } - - @Override - public void setCookies(List<Cookie> cookies) { - CookieHelper.encodeSetCookieHeader(parent.headers(), cookies); - } - - private static class Adapter implements ServletOrJdiscHttpResponse { - private final Response response; - - Adapter(Response response) { - this.response = response; - } - - @Override public void copyHeaders(HeaderFields target) { target.addAll(response.headers()); } - @Override public int getStatus() { return response.getStatus(); } - @Override public Map<String, Object> context() { return response.context(); } - @Override public List<Cookie> decodeSetCookieHeader() { return CookieHelper.decodeSetCookieHeader(response.headers()); } - } - -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityFilterInvoker.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityFilterInvoker.java deleted file mode 100644 index a0b9ec935cb..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityFilterInvoker.java +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.google.common.annotations.Beta; -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.HttpRequest.Method; -import com.yahoo.jdisc.http.servlet.ServletRequest; - -import com.yahoo.jdisc.http.servlet.ServletResponse; -import com.yahoo.jdisc.http.server.jetty.FilterInvoker; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.net.URI; -import java.util.Collections; -import java.util.List; -import java.util.Optional; - -/** - * Only intended for internal vespa use. - * - * Runs JDisc security filter without using JDisc request/response. - * Only intended to be used in a servlet context, as the error messages are tailored for that. - * - * Assumes that SecurityResponseFilters mutate DiscFilterResponse in the thread they are invoked from. - * - * @author Tony Vaagenes - */ -@Beta -public class SecurityFilterInvoker implements FilterInvoker { - - /** - * Returns the servlet request to be used in any servlets invoked after this. - */ - @Override - public HttpServletRequest invokeRequestFilterChain(RequestFilter requestFilterChain, - URI uri, HttpServletRequest httpRequest, - ResponseHandler responseHandler) { - - SecurityRequestFilterChain securityChain = cast(SecurityRequestFilterChain.class, requestFilterChain). - orElseThrow(SecurityFilterInvoker::newUnsupportedOperationException); - - ServletRequest wrappedRequest = new ServletRequest(httpRequest, uri); - securityChain.filter(new ServletFilterRequest(wrappedRequest), responseHandler); - return wrappedRequest; - } - - @Override - public void invokeResponseFilterChain( - ResponseFilter responseFilterChain, - URI uri, - HttpServletRequest request, - HttpServletResponse response) { - - SecurityResponseFilterChain securityChain = cast(SecurityResponseFilterChain.class, responseFilterChain). - orElseThrow(SecurityFilterInvoker::newUnsupportedOperationException); - - ServletFilterResponse wrappedResponse = new ServletFilterResponse(new ServletResponse(response)); - securityChain.filter(new ServletRequestView(uri, request), wrappedResponse); - } - - private static UnsupportedOperationException newUnsupportedOperationException() { - return new UnsupportedOperationException( - "Filter type not supported. If a request is handled by servlets or jax-rs, then any filters invoked for that request must be security filters."); - } - - private <T> Optional<T> cast(Class<T> securityFilterChainClass, Object filter) { - return (securityFilterChainClass.isInstance(filter))? - Optional.of(securityFilterChainClass.cast(filter)): - Optional.empty(); - } - - private static class ServletRequestView implements RequestView { - private final HttpServletRequest request; - private final URI uri; - - public ServletRequestView(URI uri, HttpServletRequest request) { - this.request = request; - this.uri = uri; - } - - @Override - public Object getAttribute(String name) { - return request.getAttribute(name); - } - - @Override - public List<String> getHeaders(String name) { - return Collections.unmodifiableList(Collections.list(request.getHeaders(name))); - } - - @Override - public Optional<String> getFirstHeader(String name) { - return getHeaders(name).stream().findFirst(); - } - - @Override - public Optional<Method> getMethod() { - return Optional.of(Method.valueOf(request.getMethod())); - } - - @Override - public URI getUri() { - return uri; - } - } - -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityRequestFilterChain.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityRequestFilterChain.java index 0c81d919f0e..554a83a240f 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityRequestFilterChain.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityRequestFilterChain.java @@ -30,7 +30,7 @@ public final class SecurityRequestFilterChain extends AbstractResource implement @Override public void filter(HttpRequest request, ResponseHandler responseHandler) { - DiscFilterRequest discFilterRequest = new JdiscFilterRequest(request); + DiscFilterRequest discFilterRequest = new DiscFilterRequest(request); filter(discFilterRequest, responseHandler); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java index 91a1da329e5..f74b0ebb5c9 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java @@ -30,7 +30,7 @@ public class SecurityResponseFilterChain extends AbstractResource implements Res @Override public void filter(Response response, Request request) { - DiscFilterResponse discFilterResponse = new JdiscFilterResponse(response); + DiscFilterResponse discFilterResponse = new DiscFilterResponse(response); RequestView requestView = new RequestViewImpl(request); filter(requestView, discFilterResponse); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterRequest.java deleted file mode 100644 index c27c0e56d30..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterRequest.java +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.yahoo.jdisc.http.HttpHeaders; -import com.yahoo.jdisc.http.servlet.ServletRequest; - -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.security.Principal; -import java.security.cert.X509Certificate; -import java.util.Arrays; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; - -/** - * Servlet implementation for JDisc filter requests. - */ -class ServletFilterRequest extends DiscFilterRequest { - - private final ServletRequest parent; - - public ServletFilterRequest(ServletRequest parent) { - super(parent); - this.parent = parent; - } - - ServletRequest getServletRequest() { - return parent; - } - - @Deprecated - public void setUri(URI uri) { - parent.setUri(uri); - } - - @Override - public String getMethod() { - return parent.getRequest().getMethod(); - } - - @Override - public void setRemoteAddr(String remoteIpAddress) { - throw new UnsupportedOperationException( - "Setting remote address is not supported for " + this.getClass().getName()); - } - - @Override - public Enumeration<String> getAttributeNames() { - Set<String> names = new HashSet<>(Collections.list(super.getAttributeNames())); - names.addAll(Collections.list(parent.getRequest().getAttributeNames())); - return Collections.enumeration(names); - } - - @Override - public Object getAttribute(String name) { - Object jdiscAttribute = super.getAttribute(name); - return jdiscAttribute != null ? - jdiscAttribute : - parent.getRequest().getAttribute(name); - } - - @Override - public void setAttribute(String name, Object value) { - super.setAttribute(name, value); - parent.getRequest().setAttribute(name, value); - } - - @Override - public boolean containsAttribute(String name) { - return super.containsAttribute(name) - || parent.getRequest().getAttribute(name) != null; - } - - @Override - public void removeAttribute(String name) { - super.removeAttribute(name); - parent.getRequest().removeAttribute(name); - } - - @Override - public String getParameter(String name) { - return parent.getParameter(name); - } - - @Override - public Enumeration<String> getParameterNames() { - return parent.getParameterNames(); - } - - @Override - public void addHeader(String name, String value) { - parent.addHeader(name, value); - } - - @Override - public String getHeader(String name) { - return parent.getHeader(name); - } - - @Override - public Enumeration<String> getHeaderNames() { - return parent.getHeaderNames(); - } - - public List<String> getHeaderNamesAsList() { - return Collections.list(getHeaderNames()); - } - - @Override - public Enumeration<String> getHeaders(String name) { - return parent.getHeaders(name); - } - - @Override - public List<String> getHeadersAsList(String name) { - return Collections.list(getHeaders(name)); - } - - @Override - public void setHeaders(String name, String value) { - parent.setHeaders(name, value); - } - - @Override - public void setHeaders(String name, List<String> values) { - parent.setHeaders(name, values); - } - - @Override - public Principal getUserPrincipal() { - return parent.getUserPrincipal(); - } - - @Override - public void setUserPrincipal(Principal principal) { - parent.setUserPrincipal(principal); - } - - @Override - public List<X509Certificate> getClientCertificateChain() { - return Optional.ofNullable(parent.getRequest().getAttribute(ServletRequest.SERVLET_REQUEST_X509CERT)) - .map(X509Certificate[].class::cast) - .map(Arrays::asList) - .orElse(Collections.emptyList()); - } - - @Override - public void removeHeaders(String name) { - parent.removeHeaders(name); - } - - @Override - public void clearCookies() { - parent.removeHeaders(HttpHeaders.Names.COOKIE); - } - - @Override - public void setCharacterEncoding(String encoding) { - super.setCharacterEncoding(encoding); - try { - parent.setCharacterEncoding(encoding); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("Encoding not supported: " + encoding, e); - } - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterResponse.java deleted file mode 100644 index b706e5a7ec6..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/ServletFilterResponse.java +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.google.common.collect.Iterables; -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpHeaders; -import com.yahoo.jdisc.http.servlet.ServletResponse; - -import javax.servlet.http.HttpServletResponse; -import java.util.Collection; -import java.util.List; - -/** - * Servlet implementation for JDisc filter responses. - */ -class ServletFilterResponse extends DiscFilterResponse { - - private final ServletResponse parent; - - public ServletFilterResponse(ServletResponse parent) { - super(parent); - this.parent = parent; - } - - ServletResponse getServletResponse() { - return parent; - } - - public void setStatus(int status) { - parent.setStatus(status); - } - - @Override - public void setHeader(String name, String value) { - parent.setHeader(name, value); - } - - @Override - public void removeHeaders(String name) { - HttpServletResponse parentResponse = parent.getResponse(); - if (parentResponse instanceof org.eclipse.jetty.server.Response) { - org.eclipse.jetty.server.Response jettyResponse = (org.eclipse.jetty.server.Response)parentResponse; - jettyResponse.getHttpFields().remove(name); - } else { - throw new UnsupportedOperationException( - "Cannot remove headers for response of type " + parentResponse.getClass().getName()); - } - } - - // Why have a setHeaders that takes a single string? - @Override - public void setHeaders(String name, String value) { - parent.setHeader(name, value); - } - - @Override - public void setHeaders(String name, List<String> values) { - for (String value : values) - parent.addHeader(name, value); - } - - @Override - public void addHeader(String name, String value) { - parent.addHeader(name, value); - } - - @Override - public String getHeader(String name) { - Collection<String> headers = parent.getHeaders(name); - return headers.isEmpty() - ? null - : Iterables.getLast(headers); - } - - @Override - public void setCookies(List<Cookie> cookies) { - removeHeaders(HttpHeaders.Names.SET_COOKIE); - List<String> setCookieHeaders = Cookie.toSetCookieHeaders(cookies); - setCookieHeaders.forEach(cookie -> addHeader(HttpHeaders.Names.SET_COOKIE, cookie)); - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java index 46738c1501b..b41c80a471c 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java @@ -6,8 +6,8 @@ import com.yahoo.container.logging.AccessLog; import com.yahoo.container.logging.AccessLogEntry; import com.yahoo.container.logging.RequestLog; import com.yahoo.container.logging.RequestLogEntry; +import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.jdisc.http.ServerConfig; -import com.yahoo.jdisc.http.servlet.ServletRequest; import org.eclipse.jetty.http2.HTTP2Stream; import org.eclipse.jetty.http2.server.HttpTransportOverHTTP2; import org.eclipse.jetty.server.HttpChannel; @@ -17,7 +17,6 @@ import org.eclipse.jetty.server.Response; import org.eclipse.jetty.util.component.AbstractLifeCycle; import javax.servlet.http.HttpServletRequest; -import java.security.Principal; import java.security.cert.X509Certificate; import java.time.Duration; import java.time.Instant; @@ -81,13 +80,15 @@ class AccessLogRequestLog extends AbstractLifeCycle implements org.eclipse.jetty addNonNullValue(builder, request.getHeader("Referer"), RequestLogEntry.Builder::referer); addNonNullValue(builder, request.getQueryString(), RequestLogEntry.Builder::rawQuery); - Principal principal = (Principal) request.getAttribute(ServletRequest.JDISC_REQUEST_PRINCIPAL); - addNonNullValue(builder, principal, RequestLogEntry.Builder::userPrincipal); + HttpRequest jdiscRequest = (HttpRequest) request.getAttribute(HttpRequest.class.getName()); + if (jdiscRequest != null) { + addNonNullValue(builder, jdiscRequest.getUserPrincipal(), RequestLogEntry.Builder::userPrincipal); + } - String requestFilterId = (String) request.getAttribute(ServletRequest.JDISC_REQUEST_CHAIN); + String requestFilterId = (String) request.getAttribute(RequestUtils.JDISC_REQUEST_CHAIN); addNonNullValue(builder, requestFilterId, (b, chain) -> b.addExtraAttribute("request-chain", chain)); - String responseFilterId = (String) request.getAttribute(ServletRequest.JDISC_RESPONSE_CHAIN); + String responseFilterId = (String) request.getAttribute(RequestUtils.JDISC_RESPONSE_CHAIN); addNonNullValue(builder, responseFilterId, (b, chain) -> b.addExtraAttribute("response-chain", chain)); UUID connectionId = (UUID) request.getAttribute(JettyConnectionLogger.CONNECTION_ID_REQUEST_ATTRIBUTE); @@ -107,7 +108,7 @@ class AccessLogRequestLog extends AbstractLifeCycle implements org.eclipse.jetty builder.addExtraAttribute(header, value); } }); - X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute(ServletRequest.SERVLET_REQUEST_X509CERT); + X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute(RequestUtils.SERVLET_REQUEST_X509CERT); if (clientCert != null && clientCert.length > 0) { builder.sslPrincipal(clientCert[0].getSubjectX500Principal()); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvoker.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvoker.java deleted file mode 100644 index 3c329bbf13b..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvoker.java +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import com.google.inject.ImplementedBy; -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.filter.RequestFilter; -import com.yahoo.jdisc.http.filter.ResponseFilter; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.net.URI; - -/** - * Separate interface since DiscFilterRequest/Response and Security filter chains are not accessible in this bundle - */ -@ImplementedBy(UnsupportedFilterInvoker.class) -public interface FilterInvoker { - HttpServletRequest invokeRequestFilterChain(RequestFilter requestFilterChain, - URI uri, - HttpServletRequest httpRequest, - ResponseHandler responseHandler); - - void invokeResponseFilterChain( - ResponseFilter responseFilterChain, - URI uri, - HttpServletRequest request, - HttpServletResponse response); -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingPrintWriter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingPrintWriter.java deleted file mode 100644 index 90b12e64a55..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingPrintWriter.java +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import java.io.IOException; -import java.io.PrintWriter; -import java.io.Writer; -import java.util.Locale; - -/** - * Invokes the response filter the first time anything is output to the underlying PrintWriter. - * The filter must be invoked before the first output call since this might cause the response - * to be committed, i.e. locked and potentially put on the wire. - * Any changes to the response after it has been committed might be ignored or cause exceptions. - * @author Tony Vaagenes - */ -final class FilterInvokingPrintWriter extends PrintWriter { - private final PrintWriter delegate; - private final OneTimeRunnable filterInvoker; - - public FilterInvokingPrintWriter(PrintWriter delegate, OneTimeRunnable filterInvoker) { - /* The PrintWriter class both - * 1) exposes new methods, the PrintWriter "interface" - * 2) implements PrintWriter and Writer methods that does some extra things before calling down to the writer methods. - * If super was invoked with the delegate PrintWriter, the superclass would behave as a PrintWriter(PrintWriter), - * i.e. the extra things in 2. would be done twice. - * To avoid this, all the methods of PrintWriter are overridden with versions that forward directly to the underlying delegate - * instead of going through super. - * The super class is initialized with a non-functioning writer to catch mistakenly non-overridden methods. - */ - super(new Writer() { - @Override - public void write(char[] cbuf, int off, int len) throws IOException { - throwAssertionError(); - } - - private void throwAssertionError() { - throw new AssertionError(FilterInvokingPrintWriter.class.getName() + " failed to delegate to the underlying writer"); - } - - @Override - public void flush() throws IOException { - throwAssertionError(); - } - - @Override - public void close() throws IOException { - throwAssertionError(); - } - }); - - this.delegate = delegate; - this.filterInvoker = filterInvoker; - } - - @Override - public String toString() { - return getClass().getName() + " (" + super.toString() + ")"; - } - - private void runFilterIfFirstInvocation() { - filterInvoker.runIfFirstInvocation(); - } - - @Override - public void flush() { - runFilterIfFirstInvocation(); - delegate.flush(); - } - - @Override - public void close() { - runFilterIfFirstInvocation(); - delegate.close(); - } - - @Override - public boolean checkError() { - return delegate.checkError(); - } - - @Override - public void write(int c) { - runFilterIfFirstInvocation(); - delegate.write(c); - } - - @Override - public void write(char[] buf, int off, int len) { - runFilterIfFirstInvocation(); - delegate.write(buf, off, len); - } - - @Override - public void write(char[] buf) { - runFilterIfFirstInvocation(); - delegate.write(buf); - } - - @Override - public void write(String s, int off, int len) { - runFilterIfFirstInvocation(); - delegate.write(s, off, len); - } - - @Override - public void write(String s) { - runFilterIfFirstInvocation(); - delegate.write(s); - } - - @Override - public void print(boolean b) { - runFilterIfFirstInvocation(); - delegate.print(b); - } - - @Override - public void print(char c) { - runFilterIfFirstInvocation(); - delegate.print(c); - } - - @Override - public void print(int i) { - runFilterIfFirstInvocation(); - delegate.print(i); - } - - @Override - public void print(long l) { - runFilterIfFirstInvocation(); - delegate.print(l); - } - - @Override - public void print(float f) { - runFilterIfFirstInvocation(); - delegate.print(f); - } - - @Override - public void print(double d) { - runFilterIfFirstInvocation(); - delegate.print(d); - } - - @Override - public void print(char[] s) { - runFilterIfFirstInvocation(); - delegate.print(s); - } - - @Override - public void print(String s) { - runFilterIfFirstInvocation(); - delegate.print(s); - } - - @Override - public void print(Object obj) { - runFilterIfFirstInvocation(); - delegate.print(obj); - } - - @Override - public void println() { - runFilterIfFirstInvocation(); - delegate.println(); - } - - @Override - public void println(boolean x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(char x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(int x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(long x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(float x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(double x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(char[] x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(String x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public void println(Object x) { - runFilterIfFirstInvocation(); - delegate.println(x); - } - - @Override - public PrintWriter printf(String format, Object... args) { - runFilterIfFirstInvocation(); - return delegate.printf(format, args); - } - - @Override - public PrintWriter printf(Locale l, String format, Object... args) { - runFilterIfFirstInvocation(); - return delegate.printf(l, format, args); - } - - @Override - public PrintWriter format(String format, Object... args) { - runFilterIfFirstInvocation(); - return delegate.format(format, args); - } - - @Override - public PrintWriter format(Locale l, String format, Object... args) { - runFilterIfFirstInvocation(); - return delegate.format(l, format, args); - } - - @Override - public PrintWriter append(CharSequence csq) { - runFilterIfFirstInvocation(); - return delegate.append(csq); - } - - @Override - public PrintWriter append(CharSequence csq, int start, int end) { - runFilterIfFirstInvocation(); - return delegate.append(csq, start, end); - } - - @Override - public PrintWriter append(char c) { - runFilterIfFirstInvocation(); - return delegate.append(c); - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingServletOutputStream.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingServletOutputStream.java deleted file mode 100644 index d2be107ef86..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterInvokingServletOutputStream.java +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import javax.servlet.ServletOutputStream; -import javax.servlet.WriteListener; -import java.io.IOException; - -/** - * Invokes the response filter the first time anything is output to the underlying ServletOutputStream. - * The filter must be invoked before the first output call since this might cause the response - * to be committed, i.e. locked and potentially put on the wire. - * Any changes to the response after it has been committed might be ignored or cause exceptions. - * - * @author Tony Vaagenes - */ -class FilterInvokingServletOutputStream extends ServletOutputStream { - private final ServletOutputStream delegate; - private final OneTimeRunnable filterInvoker; - - public FilterInvokingServletOutputStream(ServletOutputStream delegate, OneTimeRunnable filterInvoker) { - this.delegate = delegate; - this.filterInvoker = filterInvoker; - } - - @Override - public boolean isReady() { - return delegate.isReady(); - } - - @Override - public void setWriteListener(WriteListener writeListener) { - delegate.setWriteListener(writeListener); - } - - - private void runFilterIfFirstInvocation() { - filterInvoker.runIfFirstInvocation(); - } - - @Override - public void write(int b) throws IOException { - runFilterIfFirstInvocation(); - delegate.write(b); - } - - - @Override - public void write(byte[] b) throws IOException { - runFilterIfFirstInvocation(); - delegate.write(b); - } - - @Override - public void print(String s) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(s); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - runFilterIfFirstInvocation(); - delegate.write(b, off, len); - } - - @Override - public void print(boolean b) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(b); - } - - @Override - public void flush() throws IOException { - runFilterIfFirstInvocation(); - delegate.flush(); - } - - @Override - public void print(char c) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(c); - } - - @Override - public void close() throws IOException { - runFilterIfFirstInvocation(); - delegate.close(); - } - - @Override - public void print(int i) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(i); - } - - @Override - public void print(long l) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(l); - } - - @Override - public void print(float f) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(f); - } - - @Override - public void print(double d) throws IOException { - runFilterIfFirstInvocation(); - delegate.print(d); - } - - @Override - public void println() throws IOException { - runFilterIfFirstInvocation(); - delegate.println(); - } - - @Override - public void println(String s) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(s); - } - - @Override - public void println(boolean b) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(b); - } - - @Override - public void println(char c) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(c); - } - - @Override - public void println(int i) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(i); - } - - @Override - public void println(long l) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(l); - } - - @Override - public void println(float f) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(f); - } - - @Override - public void println(double d) throws IOException { - runFilterIfFirstInvocation(); - delegate.println(d); - } - - @Override - public String toString() { - return getClass().getCanonicalName() + " (" + delegate.toString() + ")"; - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java index 873f336f0c9..32def124131 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java @@ -10,7 +10,6 @@ import com.yahoo.jdisc.handler.ResponseHandler; import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.jdisc.http.filter.RequestFilter; import com.yahoo.jdisc.http.filter.ResponseFilter; -import com.yahoo.jdisc.http.servlet.ServletRequest; import org.eclipse.jetty.server.Request; import java.net.URI; @@ -40,13 +39,13 @@ class FilterResolver { Optional<String> maybeFilterId = bindings.resolveRequestFilter(jdiscUri, getConnector(request).listenPort()); if (maybeFilterId.isPresent()) { metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(request, maybeFilterId.get())); - request.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, maybeFilterId.get()); + request.setAttribute(RequestUtils.JDISC_REQUEST_CHAIN, maybeFilterId.get()); } else if (!strictFiltering) { metric.add(MetricDefinitions.FILTERING_REQUEST_UNHANDLED, 1L, createMetricContext(request, null)); } else { String syntheticFilterId = RejectingRequestFilter.SYNTHETIC_FILTER_CHAIN_ID; metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(request, syntheticFilterId)); - request.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, syntheticFilterId); + request.setAttribute(RequestUtils.JDISC_REQUEST_CHAIN, syntheticFilterId); return Optional.of(RejectingRequestFilter.INSTANCE); } return maybeFilterId.map(bindings::getRequestFilter); @@ -56,7 +55,7 @@ class FilterResolver { Optional<String> maybeFilterId = bindings.resolveResponseFilter(jdiscUri, getConnector(request).listenPort()); if (maybeFilterId.isPresent()) { metric.add(MetricDefinitions.FILTERING_RESPONSE_HANDLED, 1L, createMetricContext(request, maybeFilterId.get())); - request.setAttribute(ServletRequest.JDISC_RESPONSE_CHAIN, maybeFilterId.get()); + request.setAttribute(RequestUtils.JDISC_RESPONSE_CHAIN, maybeFilterId.get()); } else { metric.add(MetricDefinitions.FILTERING_RESPONSE_UNHANDLED, 1L, createMetricContext(request, null)); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java index 779a5f65673..9292e2024df 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java @@ -5,7 +5,6 @@ import com.yahoo.container.logging.AccessLogEntry; import com.yahoo.jdisc.Metric.Context; import com.yahoo.jdisc.References; import com.yahoo.jdisc.ResourceReference; -import com.yahoo.jdisc.Response; import com.yahoo.jdisc.handler.BindingNotFoundException; import com.yahoo.jdisc.handler.ContentChannel; import com.yahoo.jdisc.handler.OverloadException; @@ -38,7 +37,6 @@ import java.util.logging.Logger; import static com.yahoo.jdisc.http.HttpHeaders.Values.APPLICATION_X_WWW_FORM_URLENCODED; import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector; -import static com.yahoo.yolean.Exceptions.throwUnchecked; /** * @author Simon Thoresen Hult @@ -100,22 +98,8 @@ class HttpRequestDispatch { if (t != null) requestCompletion.completeExceptionally(t); else requestCompletion.complete(null); }); - } - - ContentChannel dispatchFilterRequest(Response response) { - try { - CompletableFuture<Void> requestCompletion = startServletAsyncExecution(); - jettyRequest.getInputStream().close(); - ContentChannel responseContentChannel = servletResponseController.responseHandler().handleResponse(response); - servletResponseController.finishedFuture() - .whenComplete((r, t) -> { - if (t != null) requestCompletion.completeExceptionally(t); - else requestCompletion.complete(null); - }); - return responseContentChannel; - } catch (IOException e) { - throw throwUnchecked(e); - } + // Start the reader after wiring of "finished futures" are complete + servletRequestReader.start(); } private CompletableFuture<Void> startServletAsyncExecution() { @@ -217,11 +201,7 @@ class HttpRequestDispatch { HttpRequestFactory.copyHeaders(jettyRequest, jdiscRequest); requestContentChannel = requestHandler.handleRequest(jdiscRequest, servletResponseController.responseHandler()); } - //TODO If the below method throws servletRequestReader will not complete and - // requestContentChannel will not be closed and there is a reference leak - // Ditto for the servletInputStream - return new ServletRequestReader( - jettyRequest.getInputStream(), requestContentChannel, jDiscContext.janitor, metricReporter); + return new ServletRequestReader(jettyRequest, requestContentChannel, jDiscContext.janitor, metricReporter); } private static RequestHandler newRequestHandler(JDiscContext context, diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java index 64cfbc96b17..c54fa1cf1b9 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java @@ -2,7 +2,6 @@ package com.yahoo.jdisc.http.server.jetty; import com.yahoo.jdisc.http.HttpRequest; -import com.yahoo.jdisc.http.servlet.ServletRequest; import com.yahoo.jdisc.service.CurrentContainer; import org.eclipse.jetty.server.Request; import org.eclipse.jetty.util.Utf8Appendable; @@ -32,7 +31,8 @@ class HttpRequestFactory { HttpRequest.Version.fromString(servletRequest.getProtocol()), new InetSocketAddress(servletRequest.getRemoteAddr(), servletRequest.getRemotePort()), getConnection((Request) servletRequest).getCreatedTimeStamp()); - httpRequest.context().put(ServletRequest.JDISC_REQUEST_X509CERT, getCertChain(servletRequest)); + httpRequest.context().put(RequestUtils.JDISC_REQUEST_X509CERT, getCertChain(servletRequest)); + servletRequest.setAttribute(HttpRequest.class.getName(), httpRequest); return httpRequest; } catch (Utf8Appendable.NotUtf8Exception e) { throw createBadQueryException(e); diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java index 2a6a217dc33..0aa2820f959 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.http.server.jetty; +import com.yahoo.jdisc.Metric; import com.yahoo.jdisc.http.HttpRequest; import org.eclipse.jetty.http.HttpStatus; import org.eclipse.jetty.server.AsyncContextEvent; @@ -18,12 +19,21 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Future; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.LongAdder; +import java.util.function.ObjLongConsumer; +import java.util.stream.Collectors; /** * HttpResponseStatisticsCollector collects statistics about HTTP response types aggregated by category @@ -33,7 +43,7 @@ import java.util.concurrent.atomic.LongAdder; * * @author ollivir */ -public class HttpResponseStatisticsCollector extends HandlerWrapper implements Graceful { +class HttpResponseStatisticsCollector extends HandlerWrapper implements Graceful { static final String requestTypeAttribute = "requestType"; @@ -41,48 +51,12 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G private final List<String> monitoringHandlerPaths; private final List<String> searchHandlerPaths; - public enum HttpMethod { - GET, PATCH, POST, PUT, DELETE, OPTIONS, HEAD, OTHER - } - - public enum HttpScheme { - HTTP, HTTPS, OTHER - } - - public enum HttpProtocol { - HTTP1, HTTP2, OTHER - } - - private static final String[] HTTP_RESPONSE_GROUPS = { - MetricDefinitions.RESPONSES_1XX, - MetricDefinitions.RESPONSES_2XX, - MetricDefinitions.RESPONSES_3XX, - MetricDefinitions.RESPONSES_4XX, - MetricDefinitions.RESPONSES_5XX, - MetricDefinitions.RESPONSES_401, - MetricDefinitions.RESPONSES_403 - }; - private final AtomicLong inFlight = new AtomicLong(); - private final LongAdder[][][][][] statistics; // TODO Rewrite me to a smarter data structure + private final ConcurrentMap<StatusCodeMetric, LongAdder> statistics = new ConcurrentHashMap<>(); - public HttpResponseStatisticsCollector(List<String> monitoringHandlerPaths, List<String> searchHandlerPaths) { + HttpResponseStatisticsCollector(List<String> monitoringHandlerPaths, List<String> searchHandlerPaths) { this.monitoringHandlerPaths = monitoringHandlerPaths; this.searchHandlerPaths = searchHandlerPaths; - statistics = new LongAdder[HttpProtocol.values().length][HttpScheme.values().length][HttpMethod.values().length][][]; - for (int protocol = 0; protocol < HttpProtocol.values().length; protocol++) { - for (int scheme = 0; scheme < HttpScheme.values().length; ++scheme) { - for (int method = 0; method < HttpMethod.values().length; method++) { - statistics[protocol][scheme][method] = new LongAdder[HTTP_RESPONSE_GROUPS.length][]; - for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) { - statistics[protocol][scheme][method][group] = new LongAdder[HttpRequest.RequestType.values().length]; - for (int requestType = 0; requestType < HttpRequest.RequestType.values().length; requestType++) { - statistics[protocol][scheme][method][group][requestType] = new LongAdder(); - } - } - } - } - } } private final AsyncListener completionWatcher = new AsyncListener() { @@ -133,18 +107,10 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G } private void observeEndOfRequest(Request request, HttpServletResponse flushableResponse) throws IOException { - int group = groupIndex(request); - if (group >= 0) { - HttpProtocol protocol = getProtocol(request); - HttpScheme scheme = getScheme(request); - HttpMethod method = getMethod(request); - HttpRequest.RequestType requestType = getRequestType(request); - - statistics[protocol.ordinal()][scheme.ordinal()][method.ordinal()][group][requestType.ordinal()].increment(); - if (group == 5 || group == 6) { // if 401/403, also increment 4xx - statistics[protocol.ordinal()][scheme.ordinal()][method.ordinal()][3][requestType.ordinal()].increment(); - } - } + var metrics = StatusCodeMetric.of(request, monitoringHandlerPaths, searchHandlerPaths); + metrics.forEach(metric -> + statistics.computeIfAbsent(metric, __ -> new LongAdder()) + .increment()); long live = inFlight.decrementAndGet(); FutureCallback shutdownCb = shutdown.get(); @@ -158,108 +124,24 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G } } - private int groupIndex(Request request) { - int index = request.getResponse().getStatus(); - if (index == 401) { - return 5; - } - if (index == 403) { - return 6; - } - - index = index / 100 - 1; // 1xx = 0, 2xx = 1 etc. - if (index < 0 || index >= statistics[0][0].length) { - return -1; - } else { - return index; - } - } - - private HttpScheme getScheme(Request request) { - switch (request.getScheme()) { - case "http": - return HttpScheme.HTTP; - case "https": - return HttpScheme.HTTPS; - default: - return HttpScheme.OTHER; - } - } - - private HttpMethod getMethod(Request request) { - switch (request.getMethod()) { - case "GET": - return HttpMethod.GET; - case "PATCH": - return HttpMethod.PATCH; - case "POST": - return HttpMethod.POST; - case "PUT": - return HttpMethod.PUT; - case "DELETE": - return HttpMethod.DELETE; - case "OPTIONS": - return HttpMethod.OPTIONS; - case "HEAD": - return HttpMethod.HEAD; - default: - return HttpMethod.OTHER; - } + List<StatisticsEntry> takeStatistics() { + var ret = new ArrayList<StatisticsEntry>(); + consume((metric, value) -> ret.add(new StatisticsEntry(metric, value))); + return ret; } - private HttpProtocol getProtocol(Request request) { - switch (request.getProtocol()) { - case "HTTP/1": - case "HTTP/1.0": - case "HTTP/1.1": - return HttpProtocol.HTTP1; - case "HTTP/2": - case "HTTP/2.0": - return HttpProtocol.HTTP2; - default: - return HttpProtocol.OTHER; - } + void reportSnapshot(Metric metricAggregator) { + consume((metric, value) -> { + Metric.Context ctx = metricAggregator.createContext(metric.dimensions.asMap()); + metricAggregator.add(metric.name, value, ctx); + }); } - private HttpRequest.RequestType getRequestType(Request request) { - HttpRequest.RequestType requestType = (HttpRequest.RequestType)request.getAttribute(requestTypeAttribute); - if (requestType != null) return requestType; - - // Deduce from path and method: - String path = request.getRequestURI(); - for (String monitoringHandlerPath : monitoringHandlerPaths) { - if (path.startsWith(monitoringHandlerPath)) return HttpRequest.RequestType.MONITORING; - } - for (String searchHandlerPath : searchHandlerPaths) { - if (path.startsWith(searchHandlerPath)) return HttpRequest.RequestType.READ; - } - if ("GET".equals(request.getMethod())) { - return HttpRequest.RequestType.READ; - } else { - return HttpRequest.RequestType.WRITE; - } - } - - public List<StatisticsEntry> takeStatistics() { - var ret = new ArrayList<StatisticsEntry>(); - for (HttpProtocol protocol : HttpProtocol.values()) { - int protocolIndex = protocol.ordinal(); - for (HttpScheme scheme : HttpScheme.values()) { - int schemeIndex = scheme.ordinal(); - for (HttpMethod method : HttpMethod.values()) { - int methodIndex = method.ordinal(); - for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) { - for (HttpRequest.RequestType type : HttpRequest.RequestType.values()) { - long value = statistics[protocolIndex][schemeIndex][methodIndex][group][type.ordinal()].sumThenReset(); - if (value > 0) { - ret.add(new StatisticsEntry(protocol.name().toLowerCase(), scheme.name().toLowerCase(), method.name(), HTTP_RESPONSE_GROUPS[group], type.name().toLowerCase(), value)); - } - } - } - } - } - } - return ret; + private void consume(ObjLongConsumer<StatusCodeMetric> consumer) { + statistics.forEach((metric, adder) -> { + long value = adder.sumThenReset(); + if (value > 0) consumer.accept(metric, value); + }); } @Override @@ -294,34 +176,171 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G return futureCallback != null && futureCallback.isDone(); } - public static class StatisticsEntry { + static class Dimensions { + final String protocol; + final String scheme; + final String method; + final String requestType; + final int statusCode; - public final String protocol; - public final String scheme; - public final String method; - public final String name; - public final String requestType; - public final long value; - - public StatisticsEntry(String protocol, String scheme, String method, String name, String requestType, long value) { + private Dimensions(String protocol, String scheme, String method, String requestType, int statusCode) { this.protocol = protocol; this.scheme = scheme; this.method = method; - this.name = name; this.requestType = requestType; - this.value = value; + this.statusCode = statusCode; + } + + static Dimensions of(Request req, Collection<String> monitoringHandlerPaths, + Collection<String> searchHandlerPaths) { + String requestType = requestType(req, monitoringHandlerPaths, searchHandlerPaths); + return new Dimensions(protocol(req), scheme(req), method(req), requestType, statusCode(req)); } + Map<String, Object> asMap() { + Map<String, Object> builder = new HashMap<>(); + builder.put(MetricDefinitions.PROTOCOL_DIMENSION, protocol); + builder.put(MetricDefinitions.SCHEME_DIMENSION, scheme); + builder.put(MetricDefinitions.METHOD_DIMENSION, method); + builder.put(MetricDefinitions.REQUEST_TYPE_DIMENSION, requestType); + builder.put(MetricDefinitions.STATUS_CODE_DIMENSION, (long) statusCode); + return Map.copyOf(builder); + } + + private static String protocol(Request req) { + switch (req.getProtocol()) { + case "HTTP/1": + case "HTTP/1.0": + case "HTTP/1.1": + return "http1"; + case "HTTP/2": + case "HTTP/2.0": + return "http2"; + default: + return "other"; + } + } + + private static String scheme(Request req) { + switch (req.getScheme()) { + case "http": + case "https": + return req.getScheme(); + default: + return "other"; + } + } + + private static String method(Request req) { + switch (req.getMethod()) { + case "GET": + case "PATCH": + case "POST": + case "PUT": + case "DELETE": + case "OPTIONS": + case "HEAD": + return req.getMethod(); + default: + return "other"; + } + } + + private static int statusCode(Request req) { return req.getResponse().getStatus(); } + + private static String requestType(Request req, Collection<String> monitoringHandlerPaths, + Collection<String> searchHandlerPaths) { + HttpRequest.RequestType requestType = (HttpRequest.RequestType)req.getAttribute(requestTypeAttribute); + if (requestType != null) return requestType.name().toLowerCase(); + // Deduce from path and method: + String path = req.getRequestURI(); + for (String monitoringHandlerPath : monitoringHandlerPaths) { + if (path.startsWith(monitoringHandlerPath)) return "monitoring"; + } + for (String searchHandlerPath : searchHandlerPaths) { + if (path.startsWith(searchHandlerPath)) return "read"; + } + if ("GET".equals(req.getMethod())) return "read"; + else return "write"; + } + + @Override - public String toString() { - return "protocol: " + protocol + - ", scheme: " + scheme + - ", method: " + method + - ", name: " + name + - ", requestType: " + requestType + - ", value: " + value; + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Dimensions that = (Dimensions) o; + return statusCode == that.statusCode && Objects.equals(protocol, that.protocol) + && Objects.equals(scheme, that.scheme) && Objects.equals(method, that.method) + && Objects.equals(requestType, that.requestType); + } + + @Override + public int hashCode() { + return Objects.hash(protocol, scheme, method, requestType, statusCode); } } + static class StatusCodeMetric { + final Dimensions dimensions; + final String name; + + private StatusCodeMetric(Dimensions dimensions, String name) { + this.dimensions = dimensions; + this.name = name; + } + + static Collection<StatusCodeMetric> of(Request req, Collection<String> monitoringHandlerPaths, + Collection<String> searchHandlerPaths) { + Dimensions dimensions = Dimensions.of(req, monitoringHandlerPaths, searchHandlerPaths); + return metricNames(req).stream() + .map(name -> new StatusCodeMetric(dimensions, name)) + .collect(Collectors.toSet()); + } + + @SuppressWarnings("removal") + private static Collection<String> metricNames(Request req) { + int code = req.getResponse().getStatus(); + if (code == 401) return Set.of(MetricDefinitions.RESPONSES_401, MetricDefinitions.RESPONSES_4XX); + else if (code == 403) return Set.of(MetricDefinitions.RESPONSES_403, MetricDefinitions.RESPONSES_4XX); + else if (code < 200) return Set.of(MetricDefinitions.RESPONSES_1XX); + else if (code < 300) return Set.of(MetricDefinitions.RESPONSES_2XX); + else if (code < 400) return Set.of(MetricDefinitions.RESPONSES_3XX); + else if (code < 500) return Set.of(MetricDefinitions.RESPONSES_4XX); + else return Set.of(MetricDefinitions.RESPONSES_5XX); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StatusCodeMetric that = (StatusCodeMetric) o; + return Objects.equals(dimensions, that.dimensions) && Objects.equals(name, that.name); + } + + @Override public int hashCode() { return Objects.hash(dimensions, name); } + } + + static class StatisticsEntry { + final Dimensions dimensions; + final String name; + final long value; + + StatisticsEntry(StatusCodeMetric metric, long value) { + this.dimensions = metric.dimensions; + this.name = metric.name; + this.value = value; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StatisticsEntry that = (StatisticsEntry) o; + return value == that.value && Objects.equals(dimensions, that.dimensions) && Objects.equals(name, that.name); + } + + @Override public int hashCode() { return Objects.hash(dimensions, name, value); } + } } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java deleted file mode 100644 index 0fd4e8c42fb..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import com.yahoo.container.logging.AccessLogEntry; -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.filter.RequestFilter; -import org.eclipse.jetty.server.Request; - -import javax.servlet.AsyncContext; -import javax.servlet.AsyncListener; -import javax.servlet.Filter; -import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; -import javax.servlet.ServletOutputStream; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletRequestWrapper; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpServletResponseWrapper; -import java.io.IOException; -import java.io.PrintWriter; -import java.net.URI; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.atomic.AtomicReference; - -import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector; -import static com.yahoo.yolean.Exceptions.throwUnchecked; - -/** - * Runs JDisc security filters for Servlets - * This component is split in two: - * 1) JDiscFilterInvokerFilter, which uses package private methods to support JDisc APIs - * 2) SecurityFilterInvoker, which is intended for use in a servlet context. - * - * @author Tony Vaagenes - */ -class JDiscFilterInvokerFilter implements Filter { - private final JDiscContext jDiscContext; - private final FilterInvoker filterInvoker; - - public JDiscFilterInvokerFilter(JDiscContext jDiscContext, - FilterInvoker filterInvoker) { - this.jDiscContext = jDiscContext; - this.filterInvoker = filterInvoker; - } - - - @Override - public void init(FilterConfig filterConfig) throws ServletException {} - - @Override - public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { - HttpServletRequest httpRequest = (HttpServletRequest)request; - HttpServletResponse httpResponse = (HttpServletResponse)response; - - URI uri; - try { - uri = HttpRequestFactory.getUri(httpRequest); - } catch (RequestException e) { - httpResponse.sendError(e.getResponseStatus(), e.getMessage()); - return; - } - - AtomicReference<Boolean> responseReturned = new AtomicReference<>(null); - - HttpServletRequest newRequest = runRequestFilterWithMatchingBinding(responseReturned, uri, httpRequest, httpResponse); - assert newRequest != null; - responseReturned.compareAndSet(null, false); - - if (!responseReturned.get()) { - runChainAndResponseFilters(uri, newRequest, httpResponse, chain); - } - } - - private void runChainAndResponseFilters(URI uri, HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws IOException, ServletException { - Optional<OneTimeRunnable> responseFilterInvoker = - jDiscContext.filterResolver.resolveResponseFilter(toJettyRequest(request), uri) - .map(responseFilter -> - new OneTimeRunnable(() -> - filterInvoker.invokeResponseFilterChain(responseFilter, uri, request, response))); - - - HttpServletResponse responseForServlet = responseFilterInvoker - .<HttpServletResponse>map(invoker -> - new FilterInvokingResponseWrapper(response, invoker)) - .orElse(response); - - HttpServletRequest requestForServlet = responseFilterInvoker - .<HttpServletRequest>map(invoker -> - new FilterInvokingRequestWrapper(request, invoker, responseForServlet)) - .orElse(request); - - chain.doFilter(requestForServlet, responseForServlet); - - responseFilterInvoker.ifPresent(invoker -> { - boolean requestHandledSynchronously = !request.isAsyncStarted(); - - if (requestHandledSynchronously) { - invoker.runIfFirstInvocation(); - } - // For async requests, response filters will be invoked on AsyncContext.complete(). - }); - } - - private HttpServletRequest runRequestFilterWithMatchingBinding(AtomicReference<Boolean> responseReturned, URI uri, HttpServletRequest request, HttpServletResponse response) throws IOException { - try { - RequestFilter requestFilter = jDiscContext.filterResolver.resolveRequestFilter(toJettyRequest(request), uri).orElse(null); - if (requestFilter == null) - return request; - - ResponseHandler responseHandler = createResponseHandler(responseReturned, request, response); - return filterInvoker.invokeRequestFilterChain(requestFilter, uri, request, responseHandler); - } catch (Exception e) { - throw new RuntimeException("Failed running request filter chain for uri " + uri, e); - } - } - - private ResponseHandler createResponseHandler(AtomicReference<Boolean> responseReturned, HttpServletRequest httpRequest, HttpServletResponse httpResponse) { - return jdiscResponse -> { - boolean oldValueWasNull = responseReturned.compareAndSet(null, true); - if (!oldValueWasNull) - throw new RuntimeException("Can't return response from filter asynchronously"); - - HttpRequestDispatch requestDispatch = createRequestDispatch(httpRequest, httpResponse); - return requestDispatch.dispatchFilterRequest(jdiscResponse); - }; - } - - private HttpRequestDispatch createRequestDispatch(HttpServletRequest request, HttpServletResponse response) { - try { - final AccessLogEntry accessLogEntry = null; // Not used in this context. - return new HttpRequestDispatch(jDiscContext, - accessLogEntry, - getConnector(toJettyRequest(request)).createRequestMetricContext(request, Map.of()), - request, response); - } catch (IOException e) { - throw throwUnchecked(e); - } - } - - private static Request toJettyRequest(HttpServletRequest request) { - if (request instanceof com.yahoo.jdisc.http.servlet.ServletRequest) { - return (Request) ((com.yahoo.jdisc.http.servlet.ServletRequest)request).getRequest(); - } - return (Request) request; - } - - @Override - public void destroy() {} - - // ServletRequest wrapper that is necessary because we need to wrap AsyncContext. - private static class FilterInvokingRequestWrapper extends HttpServletRequestWrapper { - private final OneTimeRunnable filterInvoker; - private final HttpServletResponse servletResponse; - - public FilterInvokingRequestWrapper( - HttpServletRequest request, - OneTimeRunnable filterInvoker, - HttpServletResponse servletResponse) { - super(request); - this.filterInvoker = filterInvoker; - this.servletResponse = servletResponse; - } - - @Override - public AsyncContext startAsync() { - final AsyncContext asyncContext = super.startAsync(); - return new FilterInvokingAsyncContext(asyncContext, filterInvoker, this, servletResponse); - } - - @Override - public AsyncContext startAsync( - final ServletRequest wrappedRequest, - final ServletResponse wrappedResponse) { - // According to the documentation, the passed request/response parameters here must either - // _be_ or _wrap_ the original request/response objects passed to the servlet - which are - // our wrappers, so no need to wrap again - we can use the user-supplied objects. - final AsyncContext asyncContext = super.startAsync(wrappedRequest, wrappedResponse); - return new FilterInvokingAsyncContext(asyncContext, filterInvoker, this, wrappedResponse); - } - - @Override - public AsyncContext getAsyncContext() { - final AsyncContext asyncContext = super.getAsyncContext(); - return new FilterInvokingAsyncContext(asyncContext, filterInvoker, this, servletResponse); - } - } - - // AsyncContext wrapper that is necessary for two reasons: - // 1) Run response filters when AsyncContext.complete() is called. - // 2) Eliminate paths where application code can get its hands on un-wrapped response object, circumventing - // running of response filters. - private static class FilterInvokingAsyncContext implements AsyncContext { - private final AsyncContext delegate; - private final OneTimeRunnable filterInvoker; - private final ServletRequest servletRequest; - private final ServletResponse servletResponse; - - public FilterInvokingAsyncContext( - AsyncContext delegate, - OneTimeRunnable filterInvoker, - ServletRequest servletRequest, - ServletResponse servletResponse) { - this.delegate = delegate; - this.filterInvoker = filterInvoker; - this.servletRequest = servletRequest; - this.servletResponse = servletResponse; - } - - @Override - public ServletRequest getRequest() { - return servletRequest; - } - - @Override - public ServletResponse getResponse() { - return servletResponse; - } - - @Override - public boolean hasOriginalRequestAndResponse() { - return delegate.hasOriginalRequestAndResponse(); - } - - @Override - public void dispatch() { - delegate.dispatch(); - } - - @Override - public void dispatch(String s) { - delegate.dispatch(s); - } - - @Override - public void dispatch(ServletContext servletContext, String s) { - delegate.dispatch(servletContext, s); - } - - @Override - public void complete() { - // Completing may commit the response, so this is the last chance to run response filters. - filterInvoker.runIfFirstInvocation(); - delegate.complete(); - } - - @Override - public void start(Runnable runnable) { - delegate.start(runnable); - } - - @Override - public void addListener(AsyncListener asyncListener) { - delegate.addListener(asyncListener); - } - - @Override - public void addListener(AsyncListener asyncListener, ServletRequest servletRequest, ServletResponse servletResponse) { - delegate.addListener(asyncListener, servletRequest, servletResponse); - } - - @Override - public <T extends AsyncListener> T createListener(Class<T> aClass) throws ServletException { - return delegate.createListener(aClass); - } - - @Override - public void setTimeout(long l) { - delegate.setTimeout(l); - } - - @Override - public long getTimeout() { - return delegate.getTimeout(); - } - } - - private static class FilterInvokingResponseWrapper extends HttpServletResponseWrapper { - private final OneTimeRunnable filterInvoker; - - public FilterInvokingResponseWrapper(HttpServletResponse response, OneTimeRunnable filterInvoker) { - super(response); - this.filterInvoker = filterInvoker; - } - - @Override - public ServletOutputStream getOutputStream() throws IOException { - ServletOutputStream delegate = super.getOutputStream(); - return new FilterInvokingServletOutputStream(delegate, filterInvoker); - } - - @Override - public PrintWriter getWriter() throws IOException { - PrintWriter delegate = super.getWriter(); - return new FilterInvokingPrintWriter(delegate, filterInvoker); - } - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java index 0e511fd3eaf..79cdb8f67cf 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java @@ -73,7 +73,7 @@ class JDiscServerConnector extends ServerConnector { public Metric.Context createRequestMetricContext(HttpServletRequest request, Map<String, String> extraDimensions) { String method = request.getMethod(); String scheme = request.getScheme(); - boolean clientAuthenticated = request.getAttribute(com.yahoo.jdisc.http.servlet.ServletRequest.SERVLET_REQUEST_X509CERT) != null; + boolean clientAuthenticated = request.getAttribute(RequestUtils.SERVLET_REQUEST_X509CERT) != null; Map<String, Object> dimensions = createConnectorDimensions(listenPort, connectorName); dimensions.put(MetricDefinitions.METHOD_DIMENSION, method); dimensions.put(MetricDefinitions.SCHEME_DIMENSION, scheme); diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java index 3f2a91c60b5..fca34f3bbd7 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java @@ -2,14 +2,12 @@ package com.yahoo.jdisc.http.server.jetty; import com.google.inject.Inject; -import com.yahoo.component.ComponentId; import com.yahoo.component.provider.ComponentRegistry; import com.yahoo.container.logging.ConnectionLog; import com.yahoo.container.logging.RequestLog; import com.yahoo.jdisc.Metric; import com.yahoo.jdisc.http.ConnectorConfig; import com.yahoo.jdisc.http.ServerConfig; -import com.yahoo.jdisc.http.ServletPathsConfig; import com.yahoo.jdisc.service.AbstractServerProvider; import com.yahoo.jdisc.service.CurrentContainer; import org.eclipse.jetty.http.HttpField; @@ -24,7 +22,6 @@ import org.eclipse.jetty.server.handler.HandlerCollection; import org.eclipse.jetty.server.handler.StatisticsHandler; import org.eclipse.jetty.server.handler.gzip.GzipHandler; import org.eclipse.jetty.server.handler.gzip.GzipHttpOutputInterceptor; -import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.servlet.ServletContextHandler; import org.eclipse.jetty.servlet.ServletHolder; import org.eclipse.jetty.util.log.JavaUtilLog; @@ -32,14 +29,12 @@ import org.eclipse.jetty.util.log.Log; import org.eclipse.jetty.util.thread.QueuedThreadPool; import javax.management.remote.JMXServiceURL; -import javax.servlet.DispatcherType; import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.BindException; import java.net.MalformedURLException; import java.util.ArrayList; import java.util.Arrays; -import java.util.EnumSet; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; @@ -63,12 +58,9 @@ public class JettyHttpServer extends AbstractServerProvider { public JettyHttpServer(CurrentContainer container, Metric metric, ServerConfig serverConfig, - ServletPathsConfig servletPathsConfig, FilterBindings filterBindings, Janitor janitor, ComponentRegistry<ConnectorFactory> connectorFactories, - ComponentRegistry<ServletHolder> servletHolders, - FilterInvoker filterInvoker, RequestLog requestLog, ConnectionLog connectionLog) { super(container); @@ -98,18 +90,13 @@ public class JettyHttpServer extends AbstractServerProvider { serverConfig); ServletHolder jdiscServlet = new ServletHolder(new JDiscHttpServlet(jDiscContext)); - FilterHolder jDiscFilterInvokerFilter = new FilterHolder(new JDiscFilterInvokerFilter(jDiscContext, filterInvoker)); - List<JDiscServerConnector> connectors = Arrays.stream(server.getConnectors()) .map(JDiscServerConnector.class::cast) .collect(toList()); server.setHandler(getHandlerCollection(serverConfig, - servletPathsConfig, connectors, - jdiscServlet, - servletHolders, - jDiscFilterInvokerFilter)); + jdiscServlet)); this.metricsReporter = new ServerMetricReporter(metric, server); } @@ -150,19 +137,9 @@ public class JettyHttpServer extends AbstractServerProvider { } private HandlerCollection getHandlerCollection(ServerConfig serverConfig, - ServletPathsConfig servletPathsConfig, List<JDiscServerConnector> connectors, - ServletHolder jdiscServlet, - ComponentRegistry<ServletHolder> servletHolders, - FilterHolder jDiscFilterInvokerFilter) { + ServletHolder jdiscServlet) { ServletContextHandler servletContextHandler = createServletContextHandler(); - - servletHolders.allComponentsById().forEach((id, servlet) -> { - String path = getServletPath(servletPathsConfig, id); - servletContextHandler.addServlet(servlet, path); - servletContextHandler.addFilter(jDiscFilterInvokerFilter, path, EnumSet.allOf(DispatcherType.class)); - }); - servletContextHandler.addServlet(jdiscServlet, "/*"); List<ConnectorConfig> connectorConfigs = connectors.stream().map(JDiscServerConnector::connectorConfig).collect(toList()); @@ -191,10 +168,6 @@ public class JettyHttpServer extends AbstractServerProvider { return handlerCollection; } - private static String getServletPath(ServletPathsConfig servletPathsConfig, ComponentId id) { - return "/" + servletPathsConfig.servlets(id.stringValue()).path(); - } - private ServletContextHandler createServletContextHandler() { ServletContextHandler servletContextHandler = new ServletContextHandler(ServletContextHandler.NO_SECURITY | ServletContextHandler.NO_SESSIONS); servletContextHandler.setContextPath("/"); diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java index 71c60ad6737..b3a7ebc761a 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java @@ -17,6 +17,7 @@ class MetricDefinitions { static final String REQUEST_SERVER_NAME_DIMENSION = "requestServerName"; static final String FILTER_CHAIN_ID_DIMENSION = "chainId"; static final String PROTOCOL_DIMENSION = "protocol"; + static final String STATUS_CODE_DIMENSION = "statusCode"; static final String NUM_OPEN_CONNECTIONS = "serverNumOpenConnections"; static final String NUM_CONNECTIONS_OPEN_MAX = "serverConnectionsOpenMax"; @@ -49,8 +50,8 @@ class MetricDefinitions { static final String RESPONSES_3XX = "http.status.3xx"; static final String RESPONSES_4XX = "http.status.4xx"; static final String RESPONSES_5XX = "http.status.5xx"; - static final String RESPONSES_401 = "http.status.401"; - static final String RESPONSES_403 = "http.status.403"; + @Deprecated(forRemoval = true, since = "7") static final String RESPONSES_401 = "http.status.401"; + @Deprecated(forRemoval = true, since = "7") static final String RESPONSES_403 = "http.status.403"; static final String STARTED_MILLIS = "serverStartedMillis"; diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/OneTimeRunnable.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/OneTimeRunnable.java deleted file mode 100644 index 24cc41d009f..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/OneTimeRunnable.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * @author Tony Vaagenes - */ -public class OneTimeRunnable { - private final Runnable runnable; - private final AtomicBoolean hasRun = new AtomicBoolean(false); - - public OneTimeRunnable(Runnable runnable) { - this.runnable = runnable; - } - - public void runIfFirstInvocation() { - boolean previous = hasRun.getAndSet(true); - if (!previous) { - runnable.run(); - } - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java index 1bddd491496..ae18c78a7d3 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java @@ -12,6 +12,11 @@ import javax.servlet.http.HttpServletRequest; * @author bjorncs */ public class RequestUtils { + public static final String JDISC_REQUEST_X509CERT = "jdisc.request.X509Certificate"; + public static final String JDISC_REQUEST_CHAIN = "jdisc.request.chain"; + public static final String JDISC_RESPONSE_CHAIN = "jdisc.response.chain"; + public static final String SERVLET_REQUEST_X509CERT = "javax.servlet.request.X509Certificate"; + private RequestUtils() {} public static Connection getConnection(Request request) { diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java index 4ab0e388579..9340dda2652 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java @@ -11,8 +11,6 @@ import org.eclipse.jetty.server.handler.StatisticsHandler; import org.eclipse.jetty.util.thread.QueuedThreadPool; import java.time.Instant; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -81,14 +79,7 @@ class ServerMetricReporter { } private void addResponseMetrics(HttpResponseStatisticsCollector statisticsCollector) { - for (var metricEntry : statisticsCollector.takeStatistics()) { - Map<String, Object> dimensions = new HashMap<>(); - dimensions.put(MetricDefinitions.METHOD_DIMENSION, metricEntry.method); - dimensions.put(MetricDefinitions.SCHEME_DIMENSION, metricEntry.scheme); - dimensions.put(MetricDefinitions.REQUEST_TYPE_DIMENSION, metricEntry.requestType); - dimensions.put(MetricDefinitions.PROTOCOL_DIMENSION, metricEntry.protocol); - metric.add(metricEntry.name, metricEntry.value, metric.createContext(dimensions)); - } + statisticsCollector.reportSnapshot(metric); } private void setJettyThreadpoolMetrics() { diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java index 1def9ccaab1..43050a53f58 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java @@ -6,6 +6,7 @@ import com.yahoo.jdisc.handler.ContentChannel; import javax.servlet.ReadListener; import javax.servlet.ServletInputStream; +import javax.servlet.http.HttpServletRequest; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -33,7 +34,7 @@ import java.util.logging.Logger; class ServletRequestReader { private enum State { - READING, ALL_DATA_READ, REQUEST_CONTENT_CLOSED + NOT_STARTED, READING, ALL_DATA_READ, REQUEST_CONTENT_CLOSED } private static final Logger log = Logger.getLogger(ServletRequestReader.class.getName()); @@ -42,11 +43,12 @@ class ServletRequestReader { private final Object monitor = new Object(); - private final ServletInputStream in; + private final HttpServletRequest req; private final ContentChannel requestContentChannel; private final Janitor janitor; private final RequestMetricReporter metricReporter; + private ServletInputStream in; private Throwable errorDuringRead; private int bytesRead; @@ -63,7 +65,7 @@ class ServletRequestReader { * (i.e. when being called from user code, don't call back into user code.) */ // GuardedBy("monitor") - private State state = State.READING; + private State state = State.NOT_STARTED; /** * Number of calls that we're waiting for from user code. @@ -94,15 +96,31 @@ class ServletRequestReader { private final CompletableFuture<Void> finishedFuture = new CompletableFuture<>(); ServletRequestReader( - ServletInputStream in, + HttpServletRequest req, ContentChannel requestContentChannel, Janitor janitor, RequestMetricReporter metricReporter) { - this.in = Objects.requireNonNull(in); + this.req = Objects.requireNonNull(req); this.requestContentChannel = Objects.requireNonNull(requestContentChannel); this.janitor = Objects.requireNonNull(janitor); this.metricReporter = Objects.requireNonNull(metricReporter); - in.setReadListener(new Listener()); + } + + /** Register read listener to start reading request data */ + void start() { + try { + ServletInputStream in; + synchronized (monitor) { + if (state != State.NOT_STARTED) throw new IllegalStateException("State=" + state); + in = req.getInputStream(); // may throw + this.in = in; + state = State.READING; + } + // Not holding monitor in case listener is invoked from this thread + in.setReadListener(new Listener()); // may throw + } catch (Throwable t) { + fail(t); + } } CompletableFuture<Void> finishedFuture() { return finishedFuture; } @@ -111,6 +129,8 @@ class ServletRequestReader { @Override public void onDataAvailable() throws IOException { + ServletInputStream in; + synchronized (monitor) { in = ServletRequestReader.this.in; } while (in.isReady()) { final byte[] buffer = new byte[BUFFER_SIZE_BYTES]; int numBytesRead; diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java index ffa31a9e8de..e90dde0e4eb 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java @@ -32,27 +32,32 @@ import static com.yahoo.jdisc.http.server.jetty.CompletionHandlerUtils.NOOP_COMP */ class ServletResponseController { + private enum State { + WAITING_FOR_RESPONSE, + ACCEPTED_RESPONSE_FROM_HANDLER, + COMMITTED_RESPONSE_FROM_HANDLER, + COMPLETED_WITH_RESPONSE_FROM_HANDLER, + COMPLETED_WITH_ERROR_RESPONSE + } + private static final Logger log = Logger.getLogger(ServletResponseController.class.getName()); /** - * The servlet spec does not require (Http)ServletResponse nor ServletOutputStream to be thread-safe. Therefore, - * we must provide our own synchronization, since we may attempt to access these objects simultaneously from - * different threads. (The typical cause of this is when one thread is writing a response while another thread - * throws an exception, causing the request to fail with an error response). + * Only a single thread must modify {@link HttpServletRequest}/{@link HttpServletResponse} at a time, + * and it must only be performed when the response is committed. + * The response cannot be modified once response content is being written. */ private final Object monitor = new Object(); - //servletResponse must not be modified after the response has been committed. private final HttpServletRequest servletRequest; private final HttpServletResponse servletResponse; private final boolean developerMode; private final ErrorResponseContentCreator errorResponseContentCreator = new ErrorResponseContentCreator(); - - //all calls to the servletOutputStreamWriter must hold the monitor first to ensure visibility of servletResponse changes. private final ServletOutputStreamWriter out; // GuardedBy("monitor") - private boolean responseCommitted = false; + private State state = State.WAITING_FOR_RESPONSE; + private Response handlerResponse; ServletResponseController( HttpServletRequest servletRequest, @@ -71,7 +76,24 @@ class ServletResponseController { void trySendErrorResponse(Throwable t) { synchronized (monitor) { try { - sendErrorResponseIfUncommitted(t); + switch (state) { + case WAITING_FOR_RESPONSE: + case ACCEPTED_RESPONSE_FROM_HANDLER: + state = State.COMPLETED_WITH_ERROR_RESPONSE; + break; + case COMMITTED_RESPONSE_FROM_HANDLER: + case COMPLETED_WITH_RESPONSE_FROM_HANDLER: + if (log.isLoggable(Level.FINE)) { + RuntimeException exceptionWithStackTrace = new RuntimeException(t); + log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace); + } + return; + case COMPLETED_WITH_ERROR_RESPONSE: + return; + default: + throw new IllegalStateException(); + } + writeErrorResponse(t); } catch (Throwable suppressed) { t.addSuppressed(suppressed); } finally { @@ -93,34 +115,28 @@ class ServletResponseController { ResponseHandler responseHandler() { return responseHandler; } - private void sendErrorResponseIfUncommitted(Throwable t) { - if (!responseCommitted) { - responseCommitted = true; - servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null); - servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null); - servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null); - servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null); - servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null); - String reasonPhrase = getReasonPhrase(t, developerMode); - int statusCode = getStatusCode(t); - setStatus(servletResponse, statusCode, reasonPhrase); - // If we are allowed to have a body - if (statusCode != HttpServletResponse.SC_NO_CONTENT && - statusCode != HttpServletResponse.SC_NOT_MODIFIED && - statusCode != HttpServletResponse.SC_PARTIAL_CONTENT && - statusCode >= HttpServletResponse.SC_OK) { - servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store"); - servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString()); - byte[] errorContent = errorResponseContentCreator - .createErrorContent(servletRequest.getRequestURI(), statusCode, reasonPhrase); - servletResponse.setContentLength(errorContent.length); - out.writeBuffer(ByteBuffer.wrap(errorContent), NOOP_COMPLETION_HANDLER); - } else { - servletResponse.setContentLength(0); - } + private void writeErrorResponse(Throwable t) { + servletResponse.setHeader(HttpHeaders.Names.EXPIRES, null); + servletResponse.setHeader(HttpHeaders.Names.LAST_MODIFIED, null); + servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, null); + servletResponse.setHeader(HttpHeaders.Names.CONTENT_TYPE, null); + servletResponse.setHeader(HttpHeaders.Names.CONTENT_LENGTH, null); + String reasonPhrase = getReasonPhrase(t, developerMode); + int statusCode = getStatusCode(t); + setStatus(servletResponse, statusCode, reasonPhrase); + // If we are allowed to have a body + if (statusCode != HttpServletResponse.SC_NO_CONTENT && + statusCode != HttpServletResponse.SC_NOT_MODIFIED && + statusCode != HttpServletResponse.SC_PARTIAL_CONTENT && + statusCode >= HttpServletResponse.SC_OK) { + servletResponse.setHeader(HttpHeaders.Names.CACHE_CONTROL, "must-revalidate,no-cache,no-store"); + servletResponse.setContentType(MimeTypes.Type.TEXT_HTML_8859_1.toString()); + byte[] errorContent = errorResponseContentCreator + .createErrorContent(servletRequest.getRequestURI(), statusCode, reasonPhrase); + servletResponse.setContentLength(errorContent.length); + out.writeBuffer(ByteBuffer.wrap(errorContent), NOOP_COMPLETION_HANDLER); } else { - RuntimeException exceptionWithStackTrace = new RuntimeException(t); - log.log(Level.FINE, "Response already committed, can't change response code", exceptionWithStackTrace); + servletResponse.setContentLength(0); } } @@ -151,60 +167,79 @@ class ServletResponseController { } } - private void setResponse(Response jdiscResponse) { + private void acceptResponseFromHandler(Response response) { synchronized (monitor) { - servletRequest.setAttribute(HttpResponseStatisticsCollector.requestTypeAttribute, jdiscResponse.getRequestType()); - if (responseCommitted) { - log.log(Level.FINE, - jdiscResponse.getError(), - () -> "Response already committed, can't change response code. " + - "From: " + servletResponse.getStatus() + ", To: " + jdiscResponse.getStatus()); - - //TODO: should throw an exception here, but this breaks unit tests. - //The failures will now instead happen when writing buffers. - out.close(); - return; - } - - if (jdiscResponse instanceof HttpResponse) { - setStatus(servletResponse, jdiscResponse.getStatus(), ((HttpResponse) jdiscResponse).getMessage()); - } else { - String message = Optional.ofNullable(jdiscResponse.getError()) - .flatMap(error -> Optional.ofNullable(error.getMessage())) - .orElse(null); - setStatus(servletResponse, jdiscResponse.getStatus(), message); - } - for (final Map.Entry<String, String> entry : jdiscResponse.headers().entries()) { - servletResponse.addHeader(entry.getKey(), entry.getValue()); - } - if (servletResponse.getContentType() == null) { - servletResponse.setContentType("text/plain;charset=utf-8"); + switch (state) { + case WAITING_FOR_RESPONSE: + case ACCEPTED_RESPONSE_FROM_HANDLER: // Allow multiple invocations to ResponseHandler.handleResponse() + handlerResponse = response; + state = State.ACCEPTED_RESPONSE_FROM_HANDLER; + servletRequest.setAttribute( + HttpResponseStatisticsCollector.requestTypeAttribute, handlerResponse.getRequestType()); + return; + case COMMITTED_RESPONSE_FROM_HANDLER: + case COMPLETED_WITH_RESPONSE_FROM_HANDLER: + String message = "Response already committed, can't change response code. " + + "From: " + servletResponse.getStatus() + ", To: " + response.getStatus(); + log.log(Level.FINE, message, response.getError()); + throw new IllegalStateException(message); + case COMPLETED_WITH_ERROR_RESPONSE: + log.log(Level.FINE, "Error response already written"); + return; // Silently ignore response from handler when request was failed out + default: + throw new IllegalStateException(); } } } - @SuppressWarnings("deprecation") private static void setStatus(HttpServletResponse response, int statusCode, String reasonPhrase) { + org.eclipse.jetty.server.Response jettyResponse = (org.eclipse.jetty.server.Response) response; if (reasonPhrase != null) { - // Sets the status line: a status code along with a custom message. - // Using a custom status message is deprecated in the Servlet API. No alternative exist. - response.setStatus(statusCode, reasonPhrase); // DEPRECATED + jettyResponse.setStatusWithReason(statusCode, reasonPhrase); } else { - response.setStatus(statusCode); + jettyResponse.setStatus(statusCode); } } - private void ensureCommitted() { + private void commitResponseFromHandlerIfUncommitted(boolean close) { synchronized (monitor) { - responseCommitted = true; + switch (state) { + case ACCEPTED_RESPONSE_FROM_HANDLER: + state = close ? State.COMPLETED_WITH_RESPONSE_FROM_HANDLER : State.COMMITTED_RESPONSE_FROM_HANDLER; + break; + case WAITING_FOR_RESPONSE: + throw new IllegalStateException("No response provided"); + case COMMITTED_RESPONSE_FROM_HANDLER: + case COMPLETED_WITH_RESPONSE_FROM_HANDLER: + return; + case COMPLETED_WITH_ERROR_RESPONSE: + log.fine("An error response is already committed - failure will be handled by ServletOutputStreamWriter"); + return; + default: + throw new IllegalStateException(); + } + if (handlerResponse instanceof HttpResponse) { + setStatus(servletResponse, handlerResponse.getStatus(), ((HttpResponse) handlerResponse).getMessage()); + } else { + String message = Optional.ofNullable(handlerResponse.getError()) + .flatMap(error -> Optional.ofNullable(error.getMessage())) + .orElse(null); + setStatus(servletResponse, handlerResponse.getStatus(), message); + } + for (final Map.Entry<String, String> entry : handlerResponse.headers().entries()) { + servletResponse.addHeader(entry.getKey(), entry.getValue()); + } + if (servletResponse.getContentType() == null) { + servletResponse.setContentType("text/plain;charset=utf-8"); + } } } private final ResponseHandler responseHandler = new ResponseHandler() { @Override public ContentChannel handleResponse(Response response) { - setResponse(response); + acceptResponseFromHandler(response); return responseContentChannel; } }; @@ -212,13 +247,13 @@ class ServletResponseController { private final ContentChannel responseContentChannel = new ContentChannel() { @Override public void write(ByteBuffer buf, CompletionHandler handler) { - ensureCommitted(); + commitResponseFromHandlerIfUncommitted(false); out.writeBuffer(buf, handlerOrNoopHandler(handler)); } @Override public void close(CompletionHandler handler) { - ensureCommitted(); + commitResponseFromHandlerIfUncommitted(true); out.close(handlerOrNoopHandler(handler)); } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java index b9293226528..3059f972ce9 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java @@ -3,7 +3,6 @@ package com.yahoo.jdisc.http.server.jetty; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.http.ConnectorConfig; -import com.yahoo.jdisc.http.servlet.ServletRequest; import org.eclipse.jetty.server.Request; import org.eclipse.jetty.server.handler.HandlerWrapper; @@ -78,6 +77,6 @@ class TlsClientAuthenticationEnforcer extends HandlerWrapper { } private boolean isClientAuthenticated(HttpServletRequest servletRequest) { - return servletRequest.getAttribute(ServletRequest.SERVLET_REQUEST_X509CERT) != null; + return servletRequest.getAttribute(RequestUtils.SERVLET_REQUEST_X509CERT) != null; } } diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/UnsupportedFilterInvoker.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/UnsupportedFilterInvoker.java deleted file mode 100644 index 8d878b64e6f..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/UnsupportedFilterInvoker.java +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty; - -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.filter.RequestFilter; -import com.yahoo.jdisc.http.filter.ResponseFilter; - -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpServletRequest; -import java.net.URI; - -/** - * @author Tony Vaagenes - */ -public class UnsupportedFilterInvoker implements FilterInvoker { - @Override - public HttpServletRequest invokeRequestFilterChain(RequestFilter requestFilterChain, - URI uri, - HttpServletRequest httpRequest, - ResponseHandler responseHandler) { - throw new UnsupportedOperationException(); - } - - @Override - public void invokeResponseFilterChain( - ResponseFilter responseFilterChain, - URI uri, - HttpServletRequest request, - HttpServletResponse response) { - throw new UnsupportedOperationException(); - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java deleted file mode 100644 index bb69832d767..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty.testutils; - -import com.google.inject.Binder; -import com.google.inject.Module; -import com.google.inject.Provides; -import com.yahoo.component.provider.ComponentRegistry; - -import org.eclipse.jetty.servlet.ServletHolder; - -/** - * @author Tony Vaagenes - */ -public class ServletModule implements Module { - - @SuppressWarnings("unused") - @Provides - public ComponentRegistry<ServletHolder> servletHolderComponentRegistry() { - return new ComponentRegistry<>(); - } - - @Override public void configure(Binder binder) { } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java index 99c49527ea5..ec0258e8763 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java @@ -10,7 +10,6 @@ import com.yahoo.jdisc.application.ContainerBuilder; import com.yahoo.jdisc.handler.RequestHandler; import com.yahoo.jdisc.http.ConnectorConfig; import com.yahoo.jdisc.http.ServerConfig; -import com.yahoo.jdisc.http.ServletPathsConfig; import com.yahoo.jdisc.http.server.jetty.FilterBindings; import com.yahoo.jdisc.http.server.jetty.JettyHttpServer; import com.yahoo.jdisc.http.server.jetty.VoidConnectionLog; @@ -84,7 +83,6 @@ public class TestDriver implements AutoCloseable { new AbstractModule() { @Override protected void configure() { - bind(ServletPathsConfig.class).toInstance(new ServletPathsConfig(new ServletPathsConfig.Builder())); bind(ServerConfig.class).toInstance(serverConfig); bind(ConnectorConfig.class).toInstance(connectorConfig); bind(FilterBindings.class).toInstance(new FilterBindings.Builder().build()); @@ -92,8 +90,7 @@ public class TestDriver implements AutoCloseable { bind(RequestLog.class).toInstance(new VoidRequestLog()); } }, - new ConnectorFactoryRegistryModule(connectorConfig), - new ServletModule()); + new ConnectorFactoryRegistryModule(connectorConfig)); } public static class Builder { diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpRequest.java deleted file mode 100644 index 3990c9a8910..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpRequest.java +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.servlet; - -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpRequest; - -import java.net.SocketAddress; -import java.net.URI; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -/** - * Common interface for JDisc and servlet http requests. - */ -public interface ServletOrJdiscHttpRequest { - - void copyHeaders(HeaderFields target); - - Map<String, List<String>> parameters(); - - URI getUri(); - - HttpRequest.Version getVersion(); - - String getRemoteHostAddress(); - String getRemoteHostName(); - int getRemotePort(); - - void setRemoteAddress(SocketAddress remoteAddress); - - Map<String, Object> context(); - - List<Cookie> decodeCookieHeader(); - - void encodeCookieHeader(List<Cookie> cookies); - - long getConnectedAt(TimeUnit unit); -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpResponse.java deleted file mode 100644 index a40e257b67d..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletOrJdiscHttpResponse.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.servlet; - -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; - -import java.util.List; -import java.util.Map; - -/** - * Common interface for JDisc and servlet http responses. - */ -public interface ServletOrJdiscHttpResponse { - - public void copyHeaders(HeaderFields target); - - public int getStatus(); - - public Map<String, Object> context(); - - public List<Cookie> decodeSetCookieHeader(); - -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java deleted file mode 100644 index 8f17c9dc523..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.servlet; - -import com.google.common.collect.ImmutableMap; -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpHeaders; -import com.yahoo.jdisc.http.HttpRequest; -import org.eclipse.jetty.server.Request; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletRequestWrapper; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.net.URI; -import java.security.Principal; -import java.util.Arrays; -import java.util.Collections; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnection; - -/** - * Mutable wrapper to use a {@link javax.servlet.http.HttpServletRequest} - * with JDisc security filters. - * <p> - * You might find it tempting to remove e.g. the getParameter... methods, - * but keep in mind that this IS-A servlet request and must provide the - * full api of such a request for use outside the "JDisc filter world". - */ -public class ServletRequest extends HttpServletRequestWrapper implements ServletOrJdiscHttpRequest { - - public static final String JDISC_REQUEST_PRINCIPAL = "jdisc.request.principal"; - public static final String JDISC_REQUEST_X509CERT = "jdisc.request.X509Certificate"; - public static final String JDISC_REQUEST_CHAIN = "jdisc.request.chain"; - public static final String JDISC_RESPONSE_CHAIN = "jdisc.response.chain"; - public static final String SERVLET_REQUEST_X509CERT = "javax.servlet.request.X509Certificate"; - public static final String SERVLET_REQUEST_SSL_SESSION_ID = "javax.servlet.request.ssl_session_id"; - public static final String SERVLET_REQUEST_CIPHER_SUITE = "javax.servlet.request.cipher_suite"; - - private final HttpServletRequest request; - private final HeaderFields headerFields; - private final Set<String> removedHeaders = new HashSet<>(); - private final Map<String, Object> context = new HashMap<>(); - private final Map<String, List<String>> parameters = new HashMap<>(); - private final long connectedAt; - - private URI uri; - private String remoteHostAddress; - private String remoteHostName; - private int remotePort; - - public ServletRequest(HttpServletRequest request, URI uri) { - super(request); - this.request = request; - - this.uri = uri; - - super.getParameterMap().forEach( - (key, values) -> parameters.put(key, Arrays.asList(values))); - - remoteHostAddress = request.getRemoteAddr(); - remoteHostName = request.getRemoteHost(); - remotePort = request.getRemotePort(); - connectedAt = getConnection((Request) request).getCreatedTimeStamp(); - - headerFields = new HeaderFields(); - Enumeration<String> parentHeaders = request.getHeaderNames(); - while (parentHeaders.hasMoreElements()) { - String name = parentHeaders.nextElement(); - Enumeration<String> values = request.getHeaders(name); - while (values.hasMoreElements()) { - headerFields.add(name, values.nextElement()); - } - } - } - - public HttpServletRequest getRequest() { - return request; - } - - @Override - public Map<String, List<String>> parameters() { - return parameters; - } - - /* We cannot just return the parameter map from the request, as the map - * may have been modified by the JDisc filters. */ - @Override - public Map<String, String[]> getParameterMap() { - Map<String, String[]> parameterMap = new HashMap<>(); - parameters().forEach( - (key, values) -> - parameterMap.put(key, values.toArray(new String[values.size()])) - ); - return ImmutableMap.copyOf(parameterMap); - } - - @Override - public String getParameter(String name) { - return parameters().containsKey(name) ? - parameters().get(name).get(0) : - null; - } - - @Override - public Enumeration<String> getParameterNames() { - return Collections.enumeration(parameters.keySet()); - } - - @Override - public String[] getParameterValues(String name) { - List<String> values = parameters().get(name); - return values != null ? - values.toArray(new String[values.size()]) : - null; - } - - @Override - public void copyHeaders(HeaderFields target) { - target.addAll(headerFields); - } - - @Override - public Enumeration<String> getHeaders(String name) { - if (removedHeaders.contains(name)) - return null; - - /* We don't need to merge headerFields and the servlet request's headers - * because setHeaders() replaces the old value. There is no 'addHeader(s)'. */ - List<String> headerFields = this.headerFields.get(name); - return headerFields == null || headerFields.isEmpty() ? - super.getHeaders(name) : - Collections.enumeration(headerFields); - } - - @Override - public String getHeader(String name) { - if (removedHeaders.contains(name)) - return null; - - String headerField = headerFields.getFirst(name); - return headerField != null ? - headerField : - super.getHeader(name); - } - - @Override - public Enumeration<String> getHeaderNames() { - Set<String> names = new HashSet<>(Collections.list(super.getHeaderNames())); - names.addAll(headerFields.keySet()); - names.removeAll(removedHeaders); - return Collections.enumeration(names); - } - - public void addHeader(String name, String value) { - headerFields.add(name, value); - removedHeaders.remove(name); - } - - public void setHeaders(String name, String value) { - headerFields.put(name, value); - removedHeaders.remove(name); - } - - public void setHeaders(String name, List<String> values) { - headerFields.put(name, values); - removedHeaders.remove(name); - } - - public void removeHeaders(String name) { - headerFields.remove(name); - removedHeaders.add(name); - } - - @Override - public URI getUri() { - return uri; - } - - public void setUri(URI uri) { - this.uri = uri; - } - - @Override - public HttpRequest.Version getVersion() { - String protocol = request.getProtocol(); - try { - return HttpRequest.Version.fromString(protocol); - } catch (NullPointerException | IllegalArgumentException e) { - throw new RuntimeException("Servlet request protocol '" + protocol + - "' could not be mapped to a JDisc http version.", e); - } - } - - @Override - public String getRemoteHostAddress() { - return remoteHostAddress; - } - - @Override - public String getRemoteHostName() { - return remoteHostName; - } - - @Override - public int getRemotePort() { - return remotePort; - } - - @Override - public void setRemoteAddress(SocketAddress remoteAddress) { - if (remoteAddress instanceof InetSocketAddress) { - remoteHostAddress = ((InetSocketAddress) remoteAddress).getAddress().getHostAddress(); - remoteHostName = ((InetSocketAddress) remoteAddress).getAddress().getHostName(); - remotePort = ((InetSocketAddress) remoteAddress).getPort(); - } else - throw new RuntimeException("Unknown SocketAddress class: " + remoteHostAddress.getClass().getName()); - - } - - @Override - public Map<String, Object> context() { - return context; - } - - @Override - public javax.servlet.http.Cookie[] getCookies() { - return decodeCookieHeader().stream(). - map(jdiscCookie -> new javax.servlet.http.Cookie(jdiscCookie.getName(), jdiscCookie.getValue())). - toArray(javax.servlet.http.Cookie[]::new); - } - - @Override - public List<Cookie> decodeCookieHeader() { - Enumeration<String> cookies = getHeaders(HttpHeaders.Names.COOKIE); - if (cookies == null) - return Collections.emptyList(); - - List<Cookie> ret = new LinkedList<>(); - while(cookies.hasMoreElements()) - ret.addAll(Cookie.fromCookieHeader(cookies.nextElement())); - - return ret; - } - - @Override - public void encodeCookieHeader(List<Cookie> cookies) { - setHeaders(HttpHeaders.Names.COOKIE, Cookie.toCookieHeader(cookies)); - } - - @Override - public long getConnectedAt(TimeUnit unit) { - return unit.convert(connectedAt, TimeUnit.MILLISECONDS); - } - - @Override - public Principal getUserPrincipal() { - // NOTE: The principal from the underlying servlet request is ignored. JDisc filters are the source-of-truth. - return (Principal) request.getAttribute(JDISC_REQUEST_PRINCIPAL); - } - - public void setUserPrincipal(Principal principal) { - request.setAttribute(JDISC_REQUEST_PRINCIPAL, principal); - } -} diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletResponse.java deleted file mode 100644 index 44a23c22b4a..00000000000 --- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.servlet; - -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpHeaders; - -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpServletResponseWrapper; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * JDisc wrapper to use a {@link javax.servlet.http.HttpServletResponse} - * with JDisc security filters. - */ -public class ServletResponse extends HttpServletResponseWrapper implements ServletOrJdiscHttpResponse { - - private final HttpServletResponse response; - private final Map<String, Object> context = new HashMap<>(); - - public ServletResponse(HttpServletResponse response) { - super(response); - this.response = response; - } - - public HttpServletResponse getResponse() { - return response; - } - - @Override - public int getStatus() { - return response.getStatus(); - } - - @Override - public Map<String, Object> context() { - return context; - } - - @Override - public void copyHeaders(HeaderFields target) { - response.getHeaderNames().forEach( header -> - target.add(header, new ArrayList<>(response.getHeaders(header))) - ); - } - - @Override - public List<Cookie> decodeSetCookieHeader() { - Collection<String> cookies = getHeaders(HttpHeaders.Names.SET_COOKIE); - if (cookies == null) { - return Collections.emptyList(); - } - List<Cookie> ret = new LinkedList<>(); - for (String cookie : cookies) { - ret.add(Cookie.fromSetCookieHeader(cookie)); - } - return ret; - } - -} diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java b/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java index d726486c195..a119902aac7 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.metrics.simple; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.metrics.simple.UntypedMetric.AssumedType; /** diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java b/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java index ab250526bd5..684bf4b5db1 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.metrics.simple; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.metrics.simple.UntypedMetric.AssumedType; /** diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java index 94be443d9bb..c2ef8afd279 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java @@ -6,7 +6,7 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicReference; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableMap; import com.yahoo.concurrent.ThreadLocalDirectory; diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java index 39403703e71..5f4b66275a9 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.metrics.simple; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * All information needed for creating any extra data structures associated with diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/Point.java b/container-core/src/main/java/com/yahoo/metrics/simple/Point.java index af0a1207072..f9ea3849ddc 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/Point.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/Point.java @@ -5,7 +5,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableList; import com.yahoo.collections.Tuple2; import com.yahoo.jdisc.Metric.Context; diff --git a/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java b/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java index 9ca1198e8ea..47061eba10b 100644 --- a/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java +++ b/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java @@ -4,7 +4,7 @@ package com.yahoo.metrics.simple; import java.util.ArrayList; import java.util.Collections; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * Single-use builder for the immutable Point instances used to set dimensions diff --git a/container-core/src/main/java/com/yahoo/processing/Response.java b/container-core/src/main/java/com/yahoo/processing/Response.java index 0319a36f2f8..cf54d043c5f 100644 --- a/container-core/src/main/java/com/yahoo/processing/Response.java +++ b/container-core/src/main/java/com/yahoo/processing/Response.java @@ -1,12 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing; -import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.provider.ListenableFreezableClass; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.concurrent.SystemTimer; import com.yahoo.processing.execution.ResponseReceiver; +import com.yahoo.processing.impl.ProcessingFuture; import com.yahoo.processing.request.CompoundName; import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.ArrayDataList; @@ -15,8 +15,8 @@ import com.yahoo.processing.response.DataList; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -57,7 +57,7 @@ public class Response extends ListenableFreezableClass { if (freezeListener != null) { if (freezeListener instanceof ResponseReceiver) ((ResponseReceiver)freezeListener).setResponse(this); - data.addFreezeListener(freezeListener, MoreExecutors.directExecutor()); + data.addFreezeListener(freezeListener, Runnable::run); } } @@ -96,15 +96,22 @@ public class Response extends ListenableFreezableClass { * @param rootDataList the list to complete recursively * @return the future in which all data in and below this list is complete, as the given root dataList for convenience */ - public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) { - List<ListenableFuture<DataList<D>>> futures = new ArrayList<>(); + public static <D extends Data> CompletableFuture<DataList<D>> recursiveFuture(DataList<D> rootDataList) { + List<CompletableFuture<DataList<D>>> futures = new ArrayList<>(); collectCompletionFutures(rootDataList, futures); return new CompleteAllOnGetFuture<D>(futures); } + /** @deprecated Use {@link #recursiveFuture(DataList)} instead */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) { + return CompletableFutures.toGuavaListenableFuture(recursiveFuture(rootDataList)); + } + @SuppressWarnings("unchecked") - private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<ListenableFuture<DataList<D>>> futures) { - futures.add(dataList.complete()); + private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<CompletableFuture<DataList<D>>> futures) { + futures.add(dataList.completeFuture()); for (D data : dataList.asList()) { if (data instanceof DataList) collectCompletionFutures((DataList<D>) data, futures); @@ -115,24 +122,24 @@ public class Response extends ListenableFreezableClass { * A future which on get calls get on all its given futures and sets the value returned from the * first given future as its result. */ - private static class CompleteAllOnGetFuture<D extends Data> extends AbstractFuture<DataList<D>> { + private static class CompleteAllOnGetFuture<D extends Data> extends ProcessingFuture<DataList<D>> { - private final List<ListenableFuture<DataList<D>>> futures; + private final List<CompletableFuture<DataList<D>>> futures; - public CompleteAllOnGetFuture(List<ListenableFuture<DataList<D>>> futures) { + public CompleteAllOnGetFuture(List<CompletableFuture<DataList<D>>> futures) { this.futures = new ArrayList<>(futures); } @Override public DataList<D> get() throws InterruptedException, ExecutionException { DataList<D> result = null; - for (ListenableFuture<DataList<D>> future : futures) { + for (CompletableFuture<DataList<D>> future : futures) { if (result == null) result = future.get(); else future.get(); } - set(result); + complete(result); return result; } @@ -141,7 +148,7 @@ public class Response extends ListenableFreezableClass { DataList<D> result = null; long timeLeft = unit.toMillis(timeout); long currentCallStart = SystemTimer.INSTANCE.milliTime(); - for (ListenableFuture<DataList<D>> future : futures) { + for (CompletableFuture<DataList<D>> future : futures) { if (result == null) result = future.get(timeLeft, TimeUnit.MILLISECONDS); else @@ -151,7 +158,7 @@ public class Response extends ListenableFreezableClass { if (timeLeft <= 0) break; currentCallStart = currentCallEnd; } - set(result); + complete(result); return result; } diff --git a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java index 5119e69f72e..9b9224e70ef 100644 --- a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java +++ b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java @@ -244,7 +244,8 @@ public abstract class AbstractProcessingHandler<COMPONENT extends Processor> ext // Render if we have a renderer capable of it if (getRenderer() instanceof AsynchronousSectionedRenderer) { - ((AsynchronousSectionedRenderer) getRenderer()).renderBeforeHandover(new ContentChannelOutputStream(channel), response, execution, request); + ((AsynchronousSectionedRenderer) getRenderer()).renderResponseBeforeHandover( + new ContentChannelOutputStream(channel), response, execution, request); } } diff --git a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java index 54fbce9e177..28645b4bde0 100644 --- a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java +++ b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java @@ -1,19 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.handler; -import java.io.IOException; -import java.io.OutputStream; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Executor; - import com.google.common.collect.ImmutableList; import com.yahoo.container.jdisc.AsyncHttpResponse; -import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.VespaHeaders; -import com.yahoo.container.logging.AccessLogEntry; import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; import com.yahoo.processing.Request; @@ -26,6 +16,14 @@ import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.Data; import com.yahoo.processing.response.DataList; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executor; + /** * A response from running a request through processing. This response is just a * wrapper of the knowhow needed to render the Response from processing. @@ -62,7 +60,7 @@ public class ProcessingResponse extends AsyncHttpResponse { AsynchronousRenderer asyncRenderer = (AsynchronousRenderer)renderer; asyncRenderer.setNetworkWiring(channel, completionHandler); } - renderer.render(stream, processingResponse, execution, processingRequest); + renderer.renderResponse(stream, processingResponse, execution, processingRequest); // the stream is closed in AsynchronousSectionedRenderer, after all data // has arrived } diff --git a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingTestDriver.java b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingTestDriver.java index 12a226dd50e..99675e3fef5 100644 --- a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingTestDriver.java +++ b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingTestDriver.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.handler; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.ComponentId; import com.yahoo.component.chain.Chain; import com.yahoo.component.provider.ComponentRegistry; diff --git a/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java new file mode 100644 index 00000000000..ab597fffaff --- /dev/null +++ b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java @@ -0,0 +1,31 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.processing.impl; + +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * A {@link CompletableFuture} where {@link #get()}/{@link #get(long, TimeUnit)} may have side-effects (e.g trigger the underlying computation). + * + * @author bjorncs + */ +// TODO Vespa 8 remove ListenableFuture implementation +public abstract class ProcessingFuture<V> extends CompletableFuture<V> implements ListenableFuture<V> { + + @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } + @Override public boolean isCancelled() { return false; } + + @Override public abstract V get() throws InterruptedException, ExecutionException; + @Override public abstract V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException; + + @Override + public void addListener(Runnable listener, Executor executor) { + whenCompleteAsync((__, ___) -> listener.run(), executor); + } + +} diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java index b77d493ea30..bb5fe7a1f76 100644 --- a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java +++ b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java @@ -2,12 +2,10 @@ package com.yahoo.processing.rendering; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.concurrent.ThreadFactoryFactory; import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; -import java.util.logging.Level; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; @@ -23,12 +21,14 @@ import java.util.ArrayDeque; import java.util.Collections; import java.util.Deque; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; import java.util.logging.Logger; /** @@ -126,7 +126,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e return executor; } - private SettableFuture<Boolean> success; + private CompletableFuture<Boolean> success; private ContentChannel channel; private CompletionHandler completionHandler; @@ -173,8 +173,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * @return a future indicating whether rendering was successful */ @Override - public final ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, - Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response, + Execution execution, Request request) { if (beforeHandoverMode) { // rendering has already started or is already complete beforeHandoverMode = false; if ( ! dataListListenerStack.isEmpty() && @@ -215,22 +215,31 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * At this point the worker thread still owns the Response, so all this rendering must happen * on the caller thread invoking freeze (that is, on the thread calling this). */ - public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response, - Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponseBeforeHandover(OutputStream stream, RESPONSE response, + Execution execution, Request request) { beforeHandoverMode = true; if ( ! isInitialized) throw new IllegalStateException("render() invoked before init()."); return startRender(stream, response, execution, request); } - private ListenableFuture<Boolean> startRender(OutputStream stream, RESPONSE response, + + /** @deprecated Use {@link #renderResponseBeforeHandover(OutputStream, Response, Execution, Request)} */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response, + Execution execution, Request request) { + return CompletableFutures.toGuavaListenableFuture(renderResponseBeforeHandover(stream, response, execution, request)); + } + + private CompletableFuture<Boolean> startRender(OutputStream stream, RESPONSE response, Execution execution, Request request) { this.response = response; this.stream = stream; this.execution = execution; DataListListener parentOfTopLevelListener = new DataListListener(new ParentOfTopLevel(request,response.data()), null); dataListListenerStack.addFirst(parentOfTopLevelListener); - success = SettableFuture.create(); + success = new CompletableFuture<>(); try { getExecutor().execute(parentOfTopLevelListener); } catch (RejectedExecutionException e) { @@ -247,7 +256,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * inadvertently work ends up in async data producing threads in some cases. */ Executor getExecutor() { - return beforeHandoverMode ? MoreExecutors.directExecutor() : renderingExecutor; + return beforeHandoverMode ? Runnable::run : renderingExecutor; } /** For inspection only; use getExecutor() for execution */ Executor getRenderingExecutor() { return renderingExecutor; } @@ -350,10 +359,10 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e return; // Called on completion of a list which is not frozen yet - hold off until frozen if ( ! beforeHandoverMode) - list.complete().get(); // trigger completion if not done already to invoke any listeners on that event + list.completeFuture().get(); // trigger completion if not done already to invoke any listeners on that event boolean startedRendering = renderData(); if ( ! startedRendering || uncompletedChildren > 0) return; // children must render to completion first - if (list.complete().isDone()) // might not be when in before handover mode + if (list.completeFuture().isDone()) // might not be when in before handover mode endListLevel(); else stream.flush(); @@ -435,8 +444,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e flushIfLikelyToSuspend(subList); subList.addFreezeListener(listListener, getExecutor()); - subList.complete().addListener(listListener, getExecutor()); - subList.incoming().completed().addListener(listListener, getExecutor()); + subList.completeFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor()); + subList.incoming().completedFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor()); } private boolean isOrdered(DataList dataList) { @@ -471,11 +480,11 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e logger.log(Level.WARNING, "Exception caught while closing stream to client.", e); } finally { if (failed != null) { - success.setException(failed); + success.completeExceptionally(failed); } else if (closeException != null) { - success.setException(closeException); + success.completeExceptionally(closeException); } else { - success.set(true); + success.complete(true); } if (channel != null) { channel.close(completionHandler); @@ -541,7 +550,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e } catch (Exception ignored) { } } - success.setException(e); + success.completeExceptionally(e); } } } catch (Error e) { diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java index 14ec3002b0a..8db4ed4f624 100644 --- a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java +++ b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java @@ -3,11 +3,13 @@ package com.yahoo.processing.rendering; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.AbstractComponent; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; import java.io.OutputStream; +import java.util.concurrent.CompletableFuture; /** * Renders a response to a stream. The renderers are cloned just before @@ -41,6 +43,17 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon } /** + * @deprecated Use/implement {@link #renderResponse(OutputStream, Response, Execution, Request)} instead. + * Return type changed from {@link ListenableFuture} to {@link CompletableFuture}. + */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, Execution execution, + Request request) { + return CompletableFutures.toGuavaListenableFuture(renderResponse(stream, response, execution, request)); + } + + /** * Render a response to a stream. The stream also exposes a ByteBuffer API * for efficient transactions to JDisc. The returned future will throw the * exception causing failure wrapped in an ExecutionException if rendering @@ -50,10 +63,13 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon * @param response the response to render * @param execution the execution which created this response * @param request the request matching the response - * @return a ListenableFuture containing a boolean where true indicates a successful rendering + * @return a {@link CompletableFuture} containing a boolean where true indicates a successful rendering */ - public abstract ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, - Execution execution, Request request); + @SuppressWarnings("removal") + public CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response, + Execution execution, Request request) { + return CompletableFutures.toCompletableFuture(render(stream, response, execution, request)); + } /** * Name of the output encoding, if applicable. diff --git a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java index 4633ac5ec1c..b1ce0643487 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java +++ b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java @@ -1,15 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.AbstractFuture; -import com.google.common.util.concurrent.ExecutionList; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.provider.ListenableFreezableClass; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.processing.Request; +import com.yahoo.processing.impl.ProcessingFuture; -import java.util.ArrayList; -import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -34,7 +32,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ private final IncomingData<DATATYPE> incomingData; - private final ListenableFuture<DataList<DATATYPE>> completedFuture; + private final CompletableFuture<DataList<DATATYPE>> completedFuture; /** * Creates a simple data list which does not allow late incoming data @@ -94,10 +92,15 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable return incomingData; } + @Override + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<DataList<DATATYPE>> complete() { - return completedFuture; + return CompletableFutures.toGuavaListenableFuture(completedFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completeFuture() { return completedFuture; } + @Override public boolean isOrdered() { return ordered; } @@ -108,7 +111,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable return super.toString() + (complete().isDone() ? " [completed]" : " [incomplete, " + incoming() + "]"); } - public static final class DrainOnGetFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> { + public static final class DrainOnGetFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> { private final DataList<DATATYPE> owner; @@ -137,7 +140,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ @Override public DataList<DATATYPE> get() throws InterruptedException, ExecutionException { - return drain(owner.incoming().completed().get()); + return drain(owner.incoming().completedFuture().get()); } /** @@ -146,13 +149,13 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ @Override public DataList<DATATYPE> get(long timeout, TimeUnit timeUnit) throws InterruptedException, ExecutionException, TimeoutException { - return drain(owner.incoming().completed().get(timeout, timeUnit)); + return drain(owner.incoming().completedFuture().get(timeout, timeUnit)); } private DataList<DATATYPE> drain(DataList<DATATYPE> dataList) { for (DATATYPE item : dataList.incoming().drain()) dataList.add(item); - set(dataList); // Signal completion to listeners + complete(dataList); // Signal completion to listeners return dataList; } diff --git a/container-core/src/main/java/com/yahoo/processing/response/DataList.java b/container-core/src/main/java/com/yahoo/processing/response/DataList.java index d566e201375..dbda8983f12 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/DataList.java +++ b/container-core/src/main/java/com/yahoo/processing/response/DataList.java @@ -1,11 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.ExecutionList; import com.google.common.util.concurrent.ListenableFuture; import java.util.List; -import java.util.concurrent.Executor; +import java.util.concurrent.CompletableFuture; /** * A list of data items created due to a processing request. @@ -73,6 +72,10 @@ public interface DataList<DATATYPE extends Data> extends Data { * Making this call on a list which does not support future data always returns immediately and * causes no memory synchronization cost. */ + CompletableFuture<DataList<DATATYPE>> completeFuture(); + + /** @deprecated Use {@link #completeFuture()} instead */ + @Deprecated(forRemoval = true, since = "7") ListenableFuture<DataList<DATATYPE>> complete(); /** diff --git a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java index 619e554f45c..813d6ac54d8 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java +++ b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java @@ -2,12 +2,13 @@ package com.yahoo.processing.response; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.collections.Tuple2; +import com.yahoo.concurrent.CompletableFutures; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; /** @@ -19,7 +20,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< private DataList<DATATYPE> owner = null; - private final SettableFuture<DataList<DATATYPE>> completionFuture; + private final CompletableFuture<DataList<DATATYPE>> completionFuture; private final List<DATATYPE> dataList = new ArrayList<>(); @@ -35,7 +36,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< public DefaultIncomingData(DataList<DATATYPE> owner) { assignOwner(owner); - completionFuture = SettableFuture.create(); + completionFuture = new CompletableFuture<>(); } /** Assigns the owner of this. Throws an exception if the owner is already set. */ @@ -50,10 +51,14 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< } @Override + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") public ListenableFuture<DataList<DATATYPE>> completed() { - return completionFuture; + return CompletableFutures.toGuavaListenableFuture(completionFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; } + /** Returns whether the data in this is complete */ @Override public synchronized boolean isComplete() { @@ -92,7 +97,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< @Override public synchronized void markComplete() { complete = true; - completionFuture.set(owner); + completionFuture.complete(owner); } /** diff --git a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java index d589b7dd195..25c230e383f 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java +++ b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.ForwardingFuture; -import com.google.common.util.concurrent.ListenableFutureTask; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; @@ -10,6 +8,8 @@ import com.yahoo.processing.request.ErrorMessage; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.logging.Level; @@ -20,9 +20,10 @@ import java.util.logging.Logger; * * @author bratseth */ -public class FutureResponse extends ForwardingFuture<Response> { +public class FutureResponse implements Future<Response> { private final Request request; + private final FutureTask<Response> task; /** * Only used for generating messages @@ -31,24 +32,23 @@ public class FutureResponse extends ForwardingFuture<Response> { private final static Logger log = Logger.getLogger(FutureResponse.class.getName()); - private final ListenableFutureTask<Response> futureTask; - public FutureResponse(final Callable<Response> callable, Execution execution, final Request request) { - this.futureTask = ListenableFutureTask.create(callable); + this.task = new FutureTask<>(callable); this.request = request; this.execution = execution; } - @Override - public ListenableFutureTask<Response> delegate() { - return futureTask; - } + public FutureTask<Response> delegate() { return task; } + + @Override public boolean cancel(boolean mayInterruptIfRunning) { return task.cancel(mayInterruptIfRunning); } + @Override public boolean isCancelled() { return task.isCancelled(); } + @Override public boolean isDone() { return task.isDone(); } public @Override Response get() { try { - return super.get(); + return task.get(); } catch (InterruptedException e) { return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e)); } catch (ExecutionException e) { @@ -61,7 +61,7 @@ public class FutureResponse extends ForwardingFuture<Response> { @Override Response get(long timeout, TimeUnit timeunit) { try { - return super.get(timeout, timeunit); + return task.get(timeout, timeunit); } catch (InterruptedException e) { return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e)); } catch (ExecutionException e) { diff --git a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java index 371c1bca45f..54ba0fa8031 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java +++ b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java @@ -1,11 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.concurrent.CompletableFutures; +import com.yahoo.processing.impl.ProcessingFuture; import java.util.Collections; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; @@ -35,6 +37,10 @@ public interface IncomingData<DATATYPE extends Data> { * <p> * This return the list owning this for convenience. */ + CompletableFuture<DataList<DATATYPE>> completedFuture(); + + /** @deprecated Use {@link #completedFuture()} instead */ + @Deprecated(forRemoval = true, since = "7") ListenableFuture<DataList<DATATYPE>> completed(); /** @@ -108,10 +114,15 @@ public interface IncomingData<DATATYPE extends Data> { completionFuture = new ImmediateFuture<>(owner); } + @Override + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<DataList<DATATYPE>> completed() { - return completionFuture; + return CompletableFutures.toGuavaListenableFuture(completionFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; } + @Override public DataList<DATATYPE> getOwner() { return owner; @@ -178,13 +189,13 @@ public interface IncomingData<DATATYPE extends Data> { * This is semantically the same as Futures.immediateFuture but contrary to it, * this never causes any memory synchronization when accessed. */ - public static class ImmediateFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> { + public static class ImmediateFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> { - private DataList<DATATYPE> owner; + private final DataList<DATATYPE> owner; public ImmediateFuture(DataList<DATATYPE> owner) { this.owner = owner; // keep here to avoid memory synchronization for access - set(owner); // Signal completion (for future listeners) + complete(owner); // Signal completion (for future listeners) } @Override diff --git a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java index aebbc3f538d..ee8dbd8dccb 100644 --- a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java +++ b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.test; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; import com.yahoo.processing.Request; @@ -15,6 +13,7 @@ import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.*; import java.util.*; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; /** @@ -288,7 +287,7 @@ public class ProcessorLibrary { private final boolean ordered, streamed; /** The incoming data this has created */ - public final SettableFuture<IncomingData> incomingData = SettableFuture.create(); + public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>(); /** Create an instance which returns ordered, streamable data */ public ListenableFutureDataSource() { this(true, true); } @@ -307,7 +306,7 @@ public class ProcessorLibrary { dataList = ArrayDataList.createAsyncNonstreamed(request); else dataList = ArrayDataList.createAsync(request); - incomingData.set(dataList.incoming()); + incomingData.complete(dataList.incoming()); return new Response(dataList); } @@ -317,12 +316,12 @@ public class ProcessorLibrary { public static class RequestCounter extends Processor { /** The incoming data this has created */ - public final SettableFuture<IncomingData> incomingData = SettableFuture.create(); + public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>(); @Override public Response process(Request request, Execution execution) { ArrayDataList dataList = ArrayDataList.createAsync(request); - incomingData.set(dataList.incoming()); + incomingData.complete(dataList.incoming()); return new Response(dataList); } @@ -354,7 +353,7 @@ public class ProcessorLibrary { // wait for other executions and merge the responses for (Response additionalResponse : AsyncExecution.waitForAll(futures, 1000)) { - additionalResponse.data().complete().get(); // block until we have all the data elements + additionalResponse.data().completeFuture().get(); // block until we have all the data elements for (Object item : additionalResponse.data().asList()) response.data().add((Data) item); response.mergeWith(additionalResponse); @@ -382,9 +381,10 @@ public class ProcessorLibrary { public Response process(Request request, Execution execution) { Response response = execution.process(request); // TODO: Consider for to best provide helpers for this - response.data().complete().addListener(new RunnableExecution(request, - new ExecutionWithResponse(asyncChain, response, execution)), - MoreExecutors.directExecutor()); + response.data().completeFuture().whenComplete( + (__, ___) -> + new RunnableExecution(request, new ExecutionWithResponse(asyncChain, response, execution)) + .run()); return response; } diff --git a/container-core/src/main/resources/configdefinitions/container.servlet.servlet-config.def b/container-core/src/main/resources/configdefinitions/container.servlet.servlet-config.def index d169ceb27d7..3cc65475913 100644 --- a/container-core/src/main/resources/configdefinitions/container.servlet.servlet-config.def +++ b/container-core/src/main/resources/configdefinitions/container.servlet.servlet-config.def @@ -1,4 +1,5 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +# TODO Vespa 8 Remove config definition namespace=container.servlet map{} string diff --git a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.servlet-paths.def b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.servlet-paths.def index db00df042bf..af788764364 100644 --- a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.servlet-paths.def +++ b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.servlet-paths.def @@ -1,4 +1,5 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +# TODO Vespa 8 Remove config definition namespace=jdisc.http # path by servlet componentId diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterRequestTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterRequestTest.java index 6c945520c1b..25efe4ac5f6 100644 --- a/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterRequestTest.java +++ b/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterRequestTest.java @@ -42,8 +42,7 @@ public class DiscFilterRequestTest { cookies.add(new Cookie("XYZ", "value")); cookies.add(new Cookie("ABC", "value")); httpReq.encodeCookieHeader(cookies); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); - Assert.assertSame(request.getParentRequest(), httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(request.getHeader("X-Custom-Header"),"custom_header"); Assert.assertEquals(request.getHeader(HttpHeaders.Names.CONTENT_TYPE),"text/html;charset=UTF-8"); @@ -63,7 +62,7 @@ public class DiscFilterRequestTest { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().add("some-header", "some-value"); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.addHeader("some-header", "some-value"); String value = request.getUntreatedHeaders().get("some-header").get(0); @@ -74,7 +73,7 @@ public class DiscFilterRequestTest { public void testRequestAttributes() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.setAttribute("some_attr", "some_value"); Assert.assertTrue(request.containsAttribute("some_attr")); @@ -87,7 +86,7 @@ public class DiscFilterRequestTest { public void testGetAttributeNames() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.setAttribute("some_attr_1", "some_value1"); request.setAttribute("some_attr_2", "some_value2"); @@ -103,7 +102,7 @@ public class DiscFilterRequestTest { public void testRemoveAttribute() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.setAttribute("some_attr", "some_value"); Assert.assertTrue(request.containsAttribute("some_attr")); @@ -117,7 +116,7 @@ public class DiscFilterRequestTest { public void testGetIntHeader() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(-1, request.getIntHeader("int_header")); @@ -130,7 +129,7 @@ public class DiscFilterRequestTest { public void testDateHeader() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(-1, request.getDateHeader(HttpHeaders.Names.IF_MODIFIED_SINCE)); @@ -144,7 +143,7 @@ public class DiscFilterRequestTest { public void testParameterAPIsAsList() { URI uri = URI.create("http://example.yahoo.com:8080/test?param1=abc¶m2=xyz¶m2=pqr"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(request.getParameter("param1"),"abc"); List<String> values = request.getParameterValuesAsList("param2"); @@ -160,7 +159,7 @@ public class DiscFilterRequestTest { public void testParameterAPI(){ URI uri = URI.create("http://example.yahoo.com:8080/test?param1=abc¶m2=xyz¶m2=pqr"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(request.getParameter("param1"),"abc"); Enumeration<String> values = request.getParameterValues("param2"); @@ -180,7 +179,7 @@ public class DiscFilterRequestTest { httpReq.headers().add(HttpHeaders.Names.CONTENT_TYPE, "multipart/form-data"); httpReq.headers().add("header_1", "value1"); httpReq.headers().add("header_2", "value2"); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertNotNull(request.getHeaderNamesAsList()); Assert.assertEquals(request.getHeaderNamesAsList().size(), 3); @@ -190,7 +189,7 @@ public class DiscFilterRequestTest { public void testGetHeadersAsList() { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertNotNull(request.getHeaderNamesAsList()); Assert.assertEquals(request.getHeaderNamesAsList().size(), 0); @@ -207,13 +206,13 @@ public class DiscFilterRequestTest { URI uri = URI.create("http://localhost:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().add(HttpHeaders.Names.CONTENT_TYPE, "multipart/form-data"); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertTrue(DiscFilterRequest.isMultipart(request)); httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().add(HttpHeaders.Names.CONTENT_TYPE, "text/html;charset=UTF-8"); - request = new JdiscFilterRequest(httpReq); + request = new DiscFilterRequest(httpReq); Assert.assertFalse(DiscFilterRequest.isMultipart(request)); @@ -221,7 +220,7 @@ public class DiscFilterRequestTest { httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - request = new JdiscFilterRequest(httpReq); + request = new DiscFilterRequest(httpReq); Assert.assertFalse(DiscFilterRequest.isMultipart(request)); } @@ -230,7 +229,7 @@ public class DiscFilterRequestTest { URI uri = URI.create("http://example.yahoo.com:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(69, request.getRemotePort()); Assert.assertEquals(8080, request.getLocalPort()); @@ -247,13 +246,13 @@ public class DiscFilterRequestTest { public void testCharacterEncoding() { URI uri = URI.create("http://example.yahoo.com:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.setHeaders(HttpHeaders.Names.CONTENT_TYPE, "text/html;charset=UTF-8"); Assert.assertEquals(request.getCharacterEncoding(), "UTF-8"); httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - request = new JdiscFilterRequest(httpReq); + request = new DiscFilterRequest(httpReq); request.setHeaders(HttpHeaders.Names.CONTENT_TYPE, "text/html"); request.setCharacterEncoding("UTF-8"); @@ -267,7 +266,7 @@ public class DiscFilterRequestTest { public void testSetScheme() { URI uri = URI.create("https://example.yahoo.com:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.setScheme("http", true); System.out.println(request.getUri().toString()); @@ -279,7 +278,7 @@ public class DiscFilterRequestTest { public void testGetServerPort() { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertEquals(request.getServerPort(), 80); request.setUri(URI.create("https://example.yahoo.com/test")); @@ -292,7 +291,7 @@ public class DiscFilterRequestTest { public void testIsSecure() { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertFalse(request.isSecure()); request.setUri(URI.create("https://example.yahoo.com/test")); @@ -304,7 +303,7 @@ public class DiscFilterRequestTest { public void requireThatUnresolvableRemoteAddressesAreSupported() { URI uri = URI.create("http://doesnotresolve.zzz:8080/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); Assert.assertNull(request.getLocalAddr()); } @@ -315,7 +314,7 @@ public class DiscFilterRequestTest { httpReq.headers().add("key1", "value1"); httpReq.headers().add("key2", Arrays.asList("value1","value2")); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); HeaderFields headers = request.getUntreatedHeaders(); Assert.assertEquals(headers.keySet().size(), 2); Assert.assertEquals(headers.get("key1").get(0), "value1" ); @@ -328,7 +327,7 @@ public class DiscFilterRequestTest { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().put(HttpHeaders.Names.COOKIE, "XYZ=value"); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.clearCookies(); Assert.assertNull(request.getHeader(HttpHeaders.Names.COOKIE)); } @@ -338,7 +337,7 @@ public class DiscFilterRequestTest { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); httpReq.headers().put(HttpHeaders.Names.COOKIE, "XYZ=value"); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); JDiscCookieWrapper[] wrappers = request.getWrappedCookies(); Assert.assertEquals(wrappers.length ,1); Assert.assertEquals(wrappers[0].getName(), "XYZ"); @@ -349,7 +348,7 @@ public class DiscFilterRequestTest { public void testAddCookie() { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterRequest request = new JdiscFilterRequest(httpReq); + DiscFilterRequest request = new DiscFilterRequest(httpReq); request.addCookie(JDiscCookieWrapper.wrap(new Cookie("name", "value"))); List<Cookie> cookies = request.getCookies(); diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterResponseTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterResponseTest.java index c49a4deed4e..bbb81ae3308 100644 --- a/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterResponseTest.java +++ b/container-core/src/test/java/com/yahoo/jdisc/http/filter/DiscFilterResponseTest.java @@ -36,7 +36,7 @@ public class DiscFilterResponseTest { public void testGetSetStatus() { HttpRequest request = newRequest(URI.create("http://localhost:8080/echo"), HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterResponse response = new JdiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); + DiscFilterResponse response = new DiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); Assert.assertEquals(response.getStatus(), HttpResponse.Status.OK); response.setStatus(HttpResponse.Status.REQUEST_TIMEOUT); @@ -47,7 +47,7 @@ public class DiscFilterResponseTest { public void testAttributes() { HttpRequest request = newRequest(URI.create("http://localhost:8080/echo"), HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterResponse response = new JdiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); + DiscFilterResponse response = new DiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); response.setAttribute("attr_1", "value1"); Assert.assertEquals(response.getAttribute("attr_1"), "value1"); List<String> list = Collections.list(response.getAttributeNames()); @@ -60,7 +60,7 @@ public class DiscFilterResponseTest { public void testAddHeader() { HttpRequest request = newRequest(URI.create("http://localhost:8080/echo"), HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); - DiscFilterResponse response = new JdiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); + DiscFilterResponse response = new DiscFilterResponse(HttpResponse.newInstance(HttpResponse.Status.OK)); response.addHeader("header1", "value1"); Assert.assertEquals(response.getHeader("header1"), "value1"); } @@ -70,7 +70,7 @@ public class DiscFilterResponseTest { URI uri = URI.create("http://example.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); HttpResponse httpResp = newResponse(httpReq, 200); - DiscFilterResponse response = new JdiscFilterResponse(httpResp); + DiscFilterResponse response = new DiscFilterResponse(httpResp); response.addCookie(JDiscCookieWrapper.wrap(new Cookie("name", "value"))); List<Cookie> cookies = response.getCookies(); @@ -83,7 +83,7 @@ public class DiscFilterResponseTest { URI uri = URI.create("http://example.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); HttpResponse httpResp = newResponse(httpReq, 200); - DiscFilterResponse response = new JdiscFilterResponse(httpResp); + DiscFilterResponse response = new DiscFilterResponse(httpResp); response.setCookie("name", "value"); List<Cookie> cookies = response.getCookies(); Assert.assertEquals(cookies.size(),1); @@ -96,17 +96,18 @@ public class DiscFilterResponseTest { URI uri = URI.create("http://example.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); HttpResponse httpResp = newResponse(httpReq, 200); - DiscFilterResponse response = new JdiscFilterResponse(httpResp); + DiscFilterResponse response = new DiscFilterResponse(httpResp); response.setHeader("name", "value"); Assert.assertEquals(response.getHeader("name"), "value"); } @Test + @SuppressWarnings("removal") public void testGetParentResponse() { URI uri = URI.create("http://example.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); HttpResponse httpResp = newResponse(httpReq, 200); - DiscFilterResponse response = new JdiscFilterResponse(httpResp); + DiscFilterResponse response = new DiscFilterResponse(httpResp); Assert.assertSame(response.getParentResponse(), httpResp); } diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java deleted file mode 100644 index dfd240d3723..00000000000 --- a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpHeaders; -import com.yahoo.jdisc.http.server.jetty.JettyMockRequestBuilder; -import com.yahoo.jdisc.http.servlet.ServletRequest; -import org.eclipse.jetty.server.Request; -import org.junit.Before; -import org.junit.Test; - -import java.net.URI; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static com.yahoo.jdisc.http.HttpRequest.Version; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test the parts of the DiscFilterRequest API that are implemented - * by ServletFilterRequest, both directly and indirectly via - * {@link com.yahoo.jdisc.http.servlet.ServletRequest}. - * - * @author gjoranv - */ -public class ServletFilterRequestTest { - - private final String host = "host1"; - private final int port = 8080; - private final String path = "/path1"; - private final String paramName = "param1"; - private final String paramValue = "p1"; - private final String listParamName = "listParam"; - private final String[] listParamValue = new String[]{"1", "2"}; - private final String headerName = "header1"; - private final String headerValue = "h1"; - private final String attributeName = "attribute1"; - private final String attributeValue = "a1"; - - private URI uri; - private DiscFilterRequest filterRequest; - private ServletRequest parentRequest; - - @Before - public void init() throws Exception { - uri = new URI("http", null, host, port, path, paramName + "=" + paramValue, null); - - filterRequest = new ServletFilterRequest(newServletRequest()); - parentRequest = ((ServletFilterRequest)filterRequest).getServletRequest(); - } - - private ServletRequest newServletRequest() { - Request parent = JettyMockRequestBuilder.newBuilder() - .remote("1.2.3.4", host, port) - .header(headerName, List.of(headerValue)) - .parameter(paramName, List.of(paramValue)) - .parameter(listParamName, List.of(listParamValue)) - .attribute(attributeName, attributeValue) - .build(); - return new ServletRequest(parent, uri); - } - - @Test - public void parent_properties_are_propagated_to_disc_filter_request() throws Exception { - assertEquals(filterRequest.getVersion(), Version.HTTP_1_1); - assertEquals(filterRequest.getMethod(), "GET"); - assertEquals(filterRequest.getUri(), uri); - assertEquals(filterRequest.getRemoteHost(), host); - assertEquals(filterRequest.getRemotePort(), port); - assertEquals(filterRequest.getRequestURI(), path); // getRequestUri return only the path by design - - assertEquals(filterRequest.getParameter(paramName), paramValue); - assertEquals(filterRequest.getParameterMap().get(paramName), - Collections.singletonList(paramValue)); - assertEquals(filterRequest.getParameterValuesAsList(listParamName), Arrays.asList(listParamValue)); - - assertEquals(filterRequest.getHeader(headerName), headerValue); - assertEquals(filterRequest.getAttribute(attributeName), attributeValue); - } - - @Test - public void untreatedHeaders_is_populated_from_the_parent_request() { - assertEquals(filterRequest.getUntreatedHeaders().getFirst(headerName), headerValue); - } - - @Test - @SuppressWarnings("deprecation") - public void uri_can_be_set() throws Exception { - URI newUri = new URI("http", null, host, port + 1, path, paramName + "=" + paramValue, null); - filterRequest.setUri(newUri); - - assertEquals(filterRequest.getUri(), newUri); - assertEquals(parentRequest.getUri(), newUri); - } - - @Test - public void attributes_can_be_set() throws Exception { - String name = "newAttribute"; - String value = name + "Value"; - filterRequest.setAttribute(name, value); - - assertEquals(filterRequest.getAttribute(name), value); - assertEquals(parentRequest.getAttribute(name), value); - } - - @Test - public void attributes_can_be_removed() { - filterRequest.removeAttribute(attributeName); - - assertEquals(filterRequest.getAttribute(attributeName), null); - assertEquals(parentRequest.getAttribute(attributeName), null); - } - - @Test - public void headers_can_be_set() throws Exception { - String name = "myHeader"; - String value = name + "Value"; - filterRequest.setHeaders(name, value); - - assertEquals(filterRequest.getHeader(name), value); - assertEquals(parentRequest.getHeader(name), value); - } - - @Test - public void headers_can_be_removed() throws Exception { - filterRequest.removeHeaders(headerName); - - assertEquals(filterRequest.getHeader(headerName), null); - assertEquals(parentRequest.getHeader(headerName), null); - } - - @Test - public void headers_can_be_added() { - String value = "h2"; - filterRequest.addHeader(headerName, value); - - List<String> expected = Arrays.asList(headerValue, value); - assertEquals(filterRequest.getHeadersAsList(headerName), expected); - assertEquals(Collections.list(parentRequest.getHeaders(headerName)), expected); - } - - @Test - public void cookies_can_be_added_and_removed() { - Cookie cookie = new Cookie("name", "value"); - filterRequest.addCookie(JDiscCookieWrapper.wrap(cookie)); - - assertEquals(filterRequest.getCookies(), Collections.singletonList(cookie)); - assertEquals(parentRequest.getCookies().length, 1); - - javax.servlet.http.Cookie servletCookie = parentRequest.getCookies()[0]; - assertEquals(servletCookie.getName(), cookie.getName()); - assertEquals(servletCookie.getValue(), cookie.getValue()); - - filterRequest.clearCookies(); - assertTrue(filterRequest.getCookies().isEmpty()); - assertEquals(parentRequest.getCookies().length, 0); - } - - @Test - public void character_encoding_can_be_set() throws Exception { - // ContentType must be non-null before setting character encoding - filterRequest.setHeaders(HttpHeaders.Names.CONTENT_TYPE, ""); - - String encoding = "myEncoding"; - filterRequest.setCharacterEncoding(encoding); - - assertTrue(filterRequest.getCharacterEncoding().contains(encoding)); - assertTrue(parentRequest.getCharacterEncoding().contains(encoding)); - } - -} diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterResponseTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterResponseTest.java deleted file mode 100644 index 1b49ad7ddd1..00000000000 --- a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterResponseTest.java +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.filter; - -import com.yahoo.jdisc.http.Cookie; -import com.yahoo.jdisc.http.HttpHeaders; -import com.yahoo.jdisc.http.servlet.ServletResponse; -import org.junit.Before; -import org.junit.Test; - -import java.util.Arrays; - -import static org.junit.Assert.assertEquals; - -/** - * @author gjoranv - * @since 5.27 - */ -public class ServletFilterResponseTest { - - private final String headerName = "header1"; - private final String headerValue = "h1"; - - private DiscFilterResponse filterResponse; - private ServletResponse parentResponse; - - @Before - public void init() throws Exception { - filterResponse = new ServletFilterResponse(newServletResponse()); - parentResponse = ((ServletFilterResponse)filterResponse).getServletResponse(); - - } - - private ServletResponse newServletResponse() throws Exception { - MockServletResponse parent = new MockServletResponse(); - parent.addHeader(headerName, headerValue); - return new ServletResponse(parent); - } - - - @Test - public void headers_can_be_set() throws Exception { - String name = "myHeader"; - String value = name + "Value"; - filterResponse.setHeaders(name, value); - - assertEquals(filterResponse.getHeader(name), value); - assertEquals(parentResponse.getHeader(name), value); - } - - @Test - public void headers_can_be_added() throws Exception { - String newValue = "h2"; - filterResponse.addHeader(headerName, newValue); - - // The DiscFilterResponse has no getHeaders() - assertEquals(filterResponse.getHeader(headerName), newValue); - - assertEquals(parentResponse.getHeaders(headerName), Arrays.asList(headerValue, newValue)); - } - - @Test - public void headers_can_be_removed() throws Exception { - filterResponse.removeHeaders(headerName); - - assertEquals(filterResponse.getHeader(headerName), null); - assertEquals(parentResponse.getHeader(headerName), null); - } - - @Test - public void set_cookie_overwrites_old_values() { - Cookie to_be_removed = new Cookie("to-be-removed", ""); - Cookie to_keep = new Cookie("to-keep", ""); - filterResponse.setCookie(to_be_removed.getName(), to_be_removed.getValue()); - filterResponse.setCookie(to_keep.getName(), to_keep.getValue()); - - assertEquals(filterResponse.getCookies(), Arrays.asList(to_keep)); - assertEquals(parentResponse.getHeaders(HttpHeaders.Names.SET_COOKIE), Arrays.asList(to_keep.toString())); - } - - - private static class MockServletResponse extends org.eclipse.jetty.server.Response { - private MockServletResponse() { - super(null, null); - } - } - -} diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollectorTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollectorTest.java index a08b9cfa96e..231a9f1384a 100644 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollectorTest.java +++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollectorTest.java @@ -31,6 +31,7 @@ import static org.hamcrest.Matchers.equalTo; /** * @author ollivir + * @author bjorncs */ public class HttpResponseStatisticsCollectorTest { @@ -47,8 +48,9 @@ public class HttpResponseStatisticsCollectorTest { testRequest("http", 200, "GET"); var stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, 1L); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_3XX, 2L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_3XX, "read", 301, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_3XX, "read", 300, 1L); } @Test @@ -65,24 +67,27 @@ public class HttpResponseStatisticsCollectorTest { testRequest("https", 200, "POST"); var stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, 1L); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, 1L); - assertStatisticsEntryPresent(stats, "http", "PUT", MetricDefinitions.RESPONSES_2XX, 1L); - assertStatisticsEntryPresent(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, 2L); - assertStatisticsEntryPresent(stats, "https", "GET", MetricDefinitions.RESPONSES_4XX, 1L); - assertStatisticsEntryPresent(stats, "https", "POST", MetricDefinitions.RESPONSES_2XX, 4L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, "read", 404, 1L); + assertStatisticsEntry(stats, "http", "PUT", MetricDefinitions.RESPONSES_2XX, "write", 200, 1L); + assertStatisticsEntry(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, "write", 200, 2L); + assertStatisticsEntry(stats, "https", "GET", MetricDefinitions.RESPONSES_4XX, "read", 404, 1L); + assertStatisticsEntry(stats, "https", "POST", MetricDefinitions.RESPONSES_2XX, "write", 200, 4L); } @Test + @SuppressWarnings("removal") public void statistics_include_grouped_and_single_statuscodes() { testRequest("http", 401, "GET"); testRequest("http", 404, "GET"); testRequest("http", 403, "GET"); var stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, 3L); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_401, 1L); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_403, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, "read", 401, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, "read", 403, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_4XX, "read", 404, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_401, "read", 401, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_403, "read", 403, 1L); } @@ -92,12 +97,12 @@ public class HttpResponseStatisticsCollectorTest { testRequest("http", 200, "GET"); var stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, 2L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 2L); testRequest("http", 200, "GET"); stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); } @Test @@ -108,15 +113,15 @@ public class HttpResponseStatisticsCollectorTest { testRequest("http", 200, "GET", "/status.html?foo=bar"); var stats = collector.takeStatistics(); - assertStatisticsEntryWithRequestTypePresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "monitoring", 1L); - assertStatisticsEntryWithRequestTypePresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 1L); - assertStatisticsEntryWithRequestTypePresent(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, "read", 1L); - assertStatisticsEntryWithRequestTypePresent(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, "write", 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "monitoring", 200, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); + assertStatisticsEntry(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); + assertStatisticsEntry(stats, "http", "POST", MetricDefinitions.RESPONSES_2XX, "write", 200, 1L); testRequest("http", 200, "GET"); stats = collector.takeStatistics(); - assertStatisticsEntryPresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); } @Test @@ -124,7 +129,7 @@ public class HttpResponseStatisticsCollectorTest { testRequest("http", 200, "GET", "/search", com.yahoo.jdisc.Request.RequestType.WRITE); var stats = collector.takeStatistics(); - assertStatisticsEntryWithRequestTypePresent(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "write", 1L); + assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "write", 200, 1L); } @Before @@ -172,18 +177,14 @@ public class HttpResponseStatisticsCollectorTest { return req; } - private static void assertStatisticsEntryPresent(List<StatisticsEntry> result, String scheme, String method, String name, long expectedValue) { + private static void assertStatisticsEntry(List<StatisticsEntry> result, String scheme, String method, String name, + String requestType, int statusCode, long expectedValue) { long value = result.stream() - .filter(entry -> entry.method.equals(method) && entry.scheme.equals(scheme) && entry.name.equals(name)) - .mapToLong(entry -> entry.value) - .findAny() - .orElseThrow(() -> new AssertionError(String.format("Not matching entry in result (scheme=%s, method=%s, name=%s)", scheme, method, name))); - assertThat(value, equalTo(expectedValue)); - } - - private static void assertStatisticsEntryWithRequestTypePresent(List<StatisticsEntry> result, String scheme, String method, String name, String requestType, long expectedValue) { - long value = result.stream() - .filter(entry -> entry.method.equals(method) && entry.scheme.equals(scheme) && entry.name.equals(name) && entry.requestType.equals(requestType)) + .filter(entry -> entry.dimensions.method.equals(method) + && entry.dimensions.scheme.equals(scheme) + && entry.name.equals(name) + && entry.dimensions.requestType.equals(requestType) + && entry.dimensions.statusCode == statusCode) .mapToLong(entry -> entry.value) .reduce(Long::sum) .orElseThrow(() -> new AssertionError(String.format("Not matching entry in result (scheme=%s, method=%s, name=%s, type=%s)", scheme, method, name, requestType))); diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java index fc63e65c395..6fc4f531bee 100644 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java +++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java @@ -590,20 +590,20 @@ public class HttpServerTest { { driver.client().newPost("/status.html").execute(); var entry = waitForStatistics(statisticsCollector); - assertEquals("http", entry.scheme); - assertEquals("POST", entry.method); + assertEquals("http", entry.dimensions.scheme); + assertEquals("POST", entry.dimensions.method); assertEquals("http.status.2xx", entry.name); - assertEquals("write", entry.requestType); + assertEquals("write", entry.dimensions.requestType); assertEquals(1, entry.value); } { driver.client().newGet("/status.html").execute(); var entry = waitForStatistics(statisticsCollector); - assertEquals("http", entry.scheme); - assertEquals("GET", entry.method); + assertEquals("http", entry.dimensions.scheme); + assertEquals("GET", entry.dimensions.method); assertEquals("http.status.2xx", entry.name); - assertEquals("read", entry.requestType); + assertEquals("read", entry.dimensions.requestType); assertEquals(1, entry.value); } @@ -611,7 +611,7 @@ public class HttpServerTest { handler.setRequestType(Request.RequestType.READ); driver.client().newPost("/status.html").execute(); var entry = waitForStatistics(statisticsCollector); - assertEquals("Handler overrides request type", "read", entry.requestType); + assertEquals("Handler overrides request type", "read", entry.dimensions.requestType); } assertTrue(driver.close()); diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java index ef3e52304c6..677fb2dbf6d 100644 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java +++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java @@ -11,6 +11,7 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLException; import javax.net.ssl.SSLHandshakeException; import java.io.IOException; +import java.net.SocketException; import java.nio.file.Path; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -169,7 +170,7 @@ class SslHandshakeMetricsTest { fail("SSLHandshakeException expected"); } catch (SSLHandshakeException e) { assertThat(e.getMessage()).contains(expectedExceptionSubstring); - } catch (SSLException e) { + } catch (SocketException | SSLException e) { // This exception is thrown if Apache httpclient's write thread detects the handshake failure before the read thread. log.log(Level.WARNING, "Client failed to get a proper TLS handshake response: " + e.getMessage(), e); // Only ignore a subset of exceptions diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java deleted file mode 100644 index c6d416b2b99..00000000000 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty.servlet; - -import com.google.inject.AbstractModule; -import com.google.inject.Module; -import com.google.inject.util.Modules; -import com.yahoo.jdisc.AbstractResource; -import com.yahoo.jdisc.Request; -import com.yahoo.jdisc.Response; -import com.yahoo.jdisc.handler.ContentChannel; -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.HttpRequest; -import com.yahoo.jdisc.http.filter.RequestFilter; -import com.yahoo.jdisc.http.filter.ResponseFilter; -import com.yahoo.jdisc.http.server.jetty.FilterBindings; -import com.yahoo.jdisc.http.server.jetty.FilterInvoker; -import com.yahoo.jdisc.http.server.jetty.SimpleHttpClient.ResponseValidator; -import com.yahoo.jdisc.http.server.jetty.JettyTestDriver; -import org.junit.Test; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; -import java.net.URI; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.is; - -/** - * @author Tony Vaagenes - * @author bjorncs - */ -public class JDiscFilterForServletTest extends ServletTestBase { - @Test - public void request_filter_can_return_response() throws IOException, InterruptedException { - JettyTestDriver testDriver = requestFilterTestDriver(); - ResponseValidator response = httpGet(testDriver, TestServlet.PATH).execute(); - - response.expectContent(containsString(TestRequestFilter.responseContent)); - } - - @Test - public void request_can_be_forwarded_through_request_filter_to_servlet() throws IOException { - JettyTestDriver testDriver = requestFilterTestDriver(); - ResponseValidator response = httpGet(testDriver, TestServlet.PATH). - addHeader(TestRequestFilter.BYPASS_FILTER_HEADER, Boolean.TRUE.toString()). - execute(); - - response.expectContent(containsString(TestServlet.RESPONSE_CONTENT)); - } - - @Test - public void response_filter_can_modify_response() throws IOException { - JettyTestDriver testDriver = responseFilterTestDriver(); - ResponseValidator response = httpGet(testDriver, TestServlet.PATH).execute(); - - response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString())); - } - - @Test - public void response_filter_is_run_on_empty_sync_response() throws IOException { - JettyTestDriver testDriver = responseFilterTestDriver(); - ResponseValidator response = httpGet(testDriver, NoContentTestServlet.PATH).execute(); - - response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString())); - } - - @Test - public void response_filter_is_run_on_empty_async_response() throws IOException { - JettyTestDriver testDriver = responseFilterTestDriver(); - ResponseValidator response = httpGet(testDriver, NoContentTestServlet.PATH). - addHeader(NoContentTestServlet.HEADER_ASYNC, Boolean.TRUE.toString()). - execute(); - - response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString())); - } - - private JettyTestDriver requestFilterTestDriver() throws IOException { - FilterBindings filterBindings = new FilterBindings.Builder() - .addRequestFilter("my-request-filter", new TestRequestFilter()) - .addRequestFilterBinding("my-request-filter", "http://*/*") - .build(); - return JettyTestDriver.newInstance(dummyRequestHandler, bindings(filterBindings)); - } - - private JettyTestDriver responseFilterTestDriver() throws IOException { - FilterBindings filterBindings = new FilterBindings.Builder() - .addResponseFilter("my-response-filter", new TestResponseFilter()) - .addResponseFilterBinding("my-response-filter", "http://*/*") - .build(); - return JettyTestDriver.newInstance(dummyRequestHandler, bindings(filterBindings)); - } - - - - private Module bindings(FilterBindings filterBindings) { - return Modules.combine( - new AbstractModule() { - @Override - protected void configure() { - bind(FilterBindings.class).toInstance(filterBindings); - bind(FilterInvoker.class).toInstance(new FilterInvoker() { - @Override - public HttpServletRequest invokeRequestFilterChain( - RequestFilter requestFilter, - URI uri, - HttpServletRequest httpRequest, - ResponseHandler responseHandler) { - TestRequestFilter filter = (TestRequestFilter) requestFilter; - filter.runAsSecurityFilter(httpRequest, responseHandler); - return httpRequest; - } - - @Override - public void invokeResponseFilterChain( - ResponseFilter responseFilter, - URI uri, - HttpServletRequest request, - HttpServletResponse response) { - - TestResponseFilter filter = (TestResponseFilter) responseFilter; - filter.runAsSecurityFilter(request, response); - } - }); - } - }, - guiceModule()); - } - - static class TestRequestFilter extends AbstractResource implements RequestFilter { - static final String simpleName = TestRequestFilter.class.getSimpleName(); - static final String responseContent = "Rejected by " + simpleName; - static final String BYPASS_FILTER_HEADER = "BYPASS_HEADER" + simpleName; - - @Override - public void filter(HttpRequest request, ResponseHandler handler) { - throw new UnsupportedOperationException(); - } - - public void runAsSecurityFilter(HttpServletRequest request, ResponseHandler responseHandler) { - if (Boolean.parseBoolean(request.getHeader(BYPASS_FILTER_HEADER))) - return; - - ContentChannel contentChannel = responseHandler.handleResponse(new Response(500)); - contentChannel.write(ByteBuffer.wrap(responseContent.getBytes(StandardCharsets.UTF_8)), null); - contentChannel.close(null); - } - } - - - static class TestResponseFilter extends AbstractResource implements ResponseFilter { - static final String INVOKED_HEADER = TestResponseFilter.class.getSimpleName() + "_INVOKED_HEADER"; - - @Override - public void filter(Response response, Request request) { - throw new UnsupportedClassVersionError(); - } - - public void runAsSecurityFilter(HttpServletRequest request, HttpServletResponse response) { - response.addHeader(INVOKED_HEADER, Boolean.TRUE.toString()); - } - } -} diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java deleted file mode 100644 index 17802b7f466..00000000000 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty.servlet; - -import com.google.inject.AbstractModule; -import com.google.inject.Module; -import com.google.inject.util.Modules; -import com.yahoo.container.logging.AccessLog; -import com.yahoo.container.logging.RequestLog; -import com.yahoo.container.logging.RequestLogEntry; -import com.yahoo.jdisc.http.server.jetty.JettyTestDriver; -import org.junit.Test; -import org.mockito.verification.VerificationMode; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -/** - * @author bakksjo - * @author bjorncs - */ -public class ServletAccessLoggingTest extends ServletTestBase { - private static final long MAX_LOG_WAIT_TIME_MILLIS = TimeUnit.SECONDS.toMillis(60); - - @Test - public void accessLogIsInvokedForNonJDiscServlet() throws Exception { - final AccessLog accessLog = mock(AccessLog.class); - final JettyTestDriver testDriver = newTestDriver(accessLog); - httpGet(testDriver, TestServlet.PATH).execute(); - verifyCallsLog(accessLog, timeout(MAX_LOG_WAIT_TIME_MILLIS).times(1)); - } - - @Test - public void accessLogIsInvokedForJDiscServlet() throws Exception { - final AccessLog accessLog = mock(AccessLog.class); - final JettyTestDriver testDriver = newTestDriver(accessLog); - testDriver.client().newGet("/status.html").execute(); - verifyCallsLog(accessLog, timeout(MAX_LOG_WAIT_TIME_MILLIS).times(1)); - } - - private void verifyCallsLog(RequestLog requestLog, final VerificationMode verificationMode) { - verify(requestLog, verificationMode).log(any(RequestLogEntry.class)); - } - - private JettyTestDriver newTestDriver(RequestLog requestLog) throws IOException { - return JettyTestDriver.newInstance(dummyRequestHandler, bindings(requestLog)); - } - - private Module bindings(RequestLog requestLog) { - return Modules.combine( - new AbstractModule() { - @Override - protected void configure() { - bind(RequestLog.class).toInstance(requestLog); - } - }, - guiceModule()); - } -} diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java deleted file mode 100644 index f13769dec38..00000000000 --- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.http.server.jetty.servlet; - -import com.google.inject.AbstractModule; -import com.google.inject.Module; -import com.google.inject.TypeLiteral; -import com.yahoo.component.ComponentId; -import com.yahoo.component.provider.ComponentRegistry; -import com.yahoo.jdisc.Request; -import com.yahoo.jdisc.handler.AbstractRequestHandler; -import com.yahoo.jdisc.handler.ContentChannel; -import com.yahoo.jdisc.handler.RequestHandler; -import com.yahoo.jdisc.handler.ResponseHandler; -import com.yahoo.jdisc.http.ServletPathsConfig; -import com.yahoo.jdisc.http.ServletPathsConfig.Servlets.Builder; -import com.yahoo.jdisc.http.server.jetty.SimpleHttpClient.RequestExecutor; -import com.yahoo.jdisc.http.server.jetty.JettyTestDriver; -import org.eclipse.jetty.servlet.ServletHolder; - -import javax.servlet.ServletException; -import javax.servlet.annotation.WebServlet; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.List; - -/** - * @author Tony Vaagenes - * @author bakksjo - */ -public class ServletTestBase { - - private static class ServletInstance { - final ComponentId componentId; final String path; final HttpServlet instance; - - ServletInstance(ComponentId componentId, String path, HttpServlet instance) { - this.componentId = componentId; - this.path = path; - this.instance = instance; - } - } - - private final List<ServletInstance> servlets = List.of( - new ServletInstance(TestServlet.ID, TestServlet.PATH, new TestServlet()), - new ServletInstance(NoContentTestServlet.ID, NoContentTestServlet.PATH, new NoContentTestServlet())); - - protected RequestExecutor httpGet(JettyTestDriver testDriver, String path) { - return testDriver.client().newGet("/" + path); - } - - protected ServletPathsConfig createServletPathConfig() { - ServletPathsConfig.Builder configBuilder = new ServletPathsConfig.Builder(); - - servlets.forEach(servlet -> - configBuilder.servlets( - servlet.componentId.stringValue(), - new Builder().path(servlet.path))); - - return new ServletPathsConfig(configBuilder); - } - - protected ComponentRegistry<ServletHolder> servlets() { - ComponentRegistry<ServletHolder> result = new ComponentRegistry<>(); - - servlets.forEach(servlet -> - result.register(servlet.componentId, new ServletHolder(servlet.instance))); - - result.freeze(); - return result; - } - - protected Module guiceModule() { - return new AbstractModule() { - @Override - protected void configure() { - bind(new TypeLiteral<ComponentRegistry<ServletHolder>>(){}).toInstance(servlets()); - bind(ServletPathsConfig.class).toInstance(createServletPathConfig()); - } - }; - } - - protected static class TestServlet extends HttpServlet { - static final String PATH = "servlet/test-servlet"; - static final ComponentId ID = ComponentId.fromString("test-servlet"); - static final String RESPONSE_CONTENT = "Response from " + TestServlet.class.getSimpleName(); - - @Override - protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - response.setContentType("text/plain"); - PrintWriter writer = response.getWriter(); - writer.write(RESPONSE_CONTENT); - writer.close(); - } - } - - @WebServlet(asyncSupported = true) - protected static class NoContentTestServlet extends HttpServlet { - static final String HEADER_ASYNC = "HEADER_ASYNC"; - - static final String PATH = "servlet/no-content-test-servlet"; - static final ComponentId ID = ComponentId.fromString("no-content-test-servlet"); - - @Override - protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - if (request.getHeader(HEADER_ASYNC) != null) { - asyncGet(request); - } - } - - private void asyncGet(HttpServletRequest request) { - request.startAsync().start(() -> { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - log("Interrupted", e); - } finally { - request.getAsyncContext().complete(); - } - }); - } - } - - - protected static final RequestHandler dummyRequestHandler = new AbstractRequestHandler() { - @Override - public ContentChannel handleRequest(Request request, ResponseHandler handler) { - throw new UnsupportedOperationException(); - } - }; -} diff --git a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java index 0f16aed3d0b..efcf608b6f0 100644 --- a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java @@ -22,7 +22,7 @@ public class ResponseTestCase { * Check the recursive toString printing along the way. * List variable names ends by numbers specifying the index of the list at each level. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testRecursiveCompletionAndToString() throws InterruptedException, ExecutionException { // create lists diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java index 40e7384c745..2fb32271419 100644 --- a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java @@ -25,7 +25,7 @@ import static org.junit.Assert.assertEquals; public class FutureDataTestCase { /** Run a chain which ends in a processor which returns a response containing future data. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testFutureDataPassThrough() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -52,7 +52,7 @@ public class FutureDataTestCase { } /** Federate to one source which returns data immediately and one who return future data */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testFederateSyncAndAsyncData() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -88,7 +88,7 @@ public class FutureDataTestCase { } /** Register a chain which will be called when some async data is available */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testAsyncDataProcessing() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -120,7 +120,7 @@ public class FutureDataTestCase { * When the first of the futures are done one additional chain is to be run. * When both are done another chain is to be run. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testAsyncDataProcessingOfFederatedResult() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java index 1ebf01c5f33..bd1307ff77c 100644 --- a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java @@ -13,7 +13,6 @@ import com.yahoo.processing.test.ProcessorLibrary; import org.junit.Test; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -27,7 +26,7 @@ import static org.junit.Assert.assertEquals; public class StreamingTestCase { /** Tests adding a chain which is called every time new data is added to a data list */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testStreamingData() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java index ce2b54ba6ff..627081e0d3b 100644 --- a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java +++ b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java @@ -15,7 +15,6 @@ import com.yahoo.processing.response.DataList; import com.yahoo.processing.response.IncomingData; import com.yahoo.text.Utf8; import org.junit.Test; -import static org.junit.Assert.*; import java.io.IOException; import java.io.OutputStream; @@ -23,10 +22,16 @@ import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; /** * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> @@ -222,7 +227,7 @@ public class AsynchronousSectionedRendererTest { return render(renderer, data); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) public String render(Renderer renderer, DataList data) throws InterruptedException, IOException { TestContentChannel contentChannel = new TestContentChannel(); @@ -403,6 +408,7 @@ public class AsynchronousSectionedRendererTest { } @Override + @SuppressWarnings("removal") public ListenableFuture<DataList<StringData>> complete() { return new ListenableFuture<DataList<StringData>>() { @Override @@ -438,6 +444,11 @@ public class AsynchronousSectionedRendererTest { } @Override + public CompletableFuture<DataList<StringData>> completeFuture() { + return CompletableFuture.completedFuture(this); + } + + @Override public String getString() { return list.toString(); } diff --git a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java index 67a6634b659..21731f7d714 100644 --- a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java +++ b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java @@ -3,8 +3,12 @@ package com.yahoo.processing.test.documentation; import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.chain.Chain; -import com.yahoo.processing.*; -import com.yahoo.processing.execution.*; +import com.yahoo.processing.Processor; +import com.yahoo.processing.Request; +import com.yahoo.processing.Response; +import com.yahoo.processing.execution.Execution; +import com.yahoo.processing.execution.ExecutionWithResponse; +import com.yahoo.processing.execution.RunnableExecution; /** * A processor which registers a listener on the future completion of @@ -18,6 +22,7 @@ public class AsyncDataProcessingInitiator extends Processor { this.asyncChain=asyncChain; } + @SuppressWarnings({"removal"}) @Override public Response process(Request request, Execution execution) { Response response=execution.process(request); diff --git a/container-disc/abi-spec.json b/container-disc/abi-spec.json index 735211ff47c..d924d5196b9 100644 --- a/container-disc/abi-spec.json +++ b/container-disc/abi-spec.json @@ -109,8 +109,10 @@ "public void <init>()", "public void <init>(com.yahoo.container.jdisc.secretstore.SecretStoreConfig)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Groups$Builder)", + "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(java.util.function.Consumer)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(java.util.List)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(com.yahoo.container.jdisc.secretstore.SecretStoreConfig$AwsParameterStores$Builder)", + "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(java.util.function.Consumer)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/ZoneInfoProvider.java b/container-disc/src/main/java/com/yahoo/container/jdisc/ZoneInfoProvider.java index 30a4c740ff0..864d44886cb 100644 --- a/container-disc/src/main/java/com/yahoo/container/jdisc/ZoneInfoProvider.java +++ b/container-disc/src/main/java/com/yahoo/container/jdisc/ZoneInfoProvider.java @@ -1,10 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.container.jdisc; +import ai.vespa.cloud.ApplicationId; import ai.vespa.cloud.Environment; import ai.vespa.cloud.Zone; import ai.vespa.cloud.ZoneInfo; import com.google.inject.Inject; +import com.yahoo.cloud.config.ApplicationIdConfig; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.component.AbstractComponent; import com.yahoo.container.di.componentgraph.Provider; @@ -20,8 +22,11 @@ public class ZoneInfoProvider extends AbstractComponent implements Provider<Zone private final ZoneInfo instance; @Inject - public ZoneInfoProvider(ConfigserverConfig csConfig) { - this.instance = new ZoneInfo(new Zone(Environment.valueOf(csConfig.environment()), csConfig.region())); + public ZoneInfoProvider(ConfigserverConfig csConfig, ApplicationIdConfig applicationIdConfig) { + this.instance = new ZoneInfo(new ApplicationId(applicationIdConfig.tenant(), + applicationIdConfig.application(), + applicationIdConfig.instance()), + new Zone(Environment.valueOf(csConfig.environment()), csConfig.region())); } @Override diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java index 7bb01e76b43..879778487f5 100644 --- a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java +++ b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java @@ -62,10 +62,10 @@ public class GarbageCollectionMetrics { for(Iterator<Map.Entry<String, LinkedList<GcStats>>> it = gcStatistics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, LinkedList<GcStats>> entry = it.next(); LinkedList<GcStats> history = entry.getValue(); - while(history.isEmpty() == false && oldestToKeep.isAfter(history.getFirst().when)) { + while( ! history.isEmpty() && oldestToKeep.isAfter(history.getFirst().when)) { history.removeFirst(); } - if(history.isEmpty()) { + if (history.isEmpty()) { it.remove(); } } diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh index d465edb3c39..5d47392292d 100755 --- a/container-disc/src/main/sh/vespa-start-container-daemon.sh +++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh @@ -19,7 +19,12 @@ cd ${VESPA_HOME} || { echo "Cannot cd to ${VESPA_HOME}" 1>&2; exit 1; } . libexec/vespa/common-env.sh -DISCRIMINATOR=`echo ${VESPA_CONFIG_ID} | md5sum | cut -d' ' -f1` +if test "$(uname -s)" = Darwin +then + DISCRIMINATOR=`echo ${VESPA_CONFIG_ID} | md5 -r | cut -d' ' -f1` +else + DISCRIMINATOR=`echo ${VESPA_CONFIG_ID} | md5sum | cut -d' ' -f1` +fi CONTAINER_HOME="${VESPA_HOME}/var/jdisc_container/${DISCRIMINATOR}/" ZOOKEEPER_LOG_FILE_PREFIX="${VESPA_HOME}/logs/vespa/zookeeper.${VESPA_SERVICE_NAME}" @@ -39,10 +44,6 @@ CP="${VESPA_HOME}/lib/jars/jdisc_core-jar-with-dependencies.jar" mkdir -p $bundlecachedir || exit 1 printenv > $cfpfile || exit 1 -# ??? TODO ??? XXX ??? -# LANG=en_US.utf8 -# LC_ALL=C - getconfig() { qrstartcfg="" @@ -244,6 +245,13 @@ import_cfg_var () { fi } +# TODO Vespa 8: Remove when all containers use JDK 17 +configure_illegal_access() { + if [[ "$VESPA_JDK_VERSION" = "11" ]]; then + illegal_access_option="--illegal-access=debug" + fi +} + getconfig configure_memory configure_gcopts @@ -252,6 +260,7 @@ configure_classpath configure_numactl configure_cpu configure_preload +configure_illegal_access exec $numactlcmd $envcmd java \ -Dconfig.id="${VESPA_CONFIG_ID}" \ @@ -265,6 +274,7 @@ exec $numactlcmd $envcmd java \ -XX:HeapDumpPath="${VESPA_HOME}/var/crash" \ -XX:ErrorFile="${VESPA_HOME}/var/crash/hs_err_pid%p.log" \ -XX:+ExitOnOutOfMemoryError \ + ${illegal_access_option} \ --add-opens=java.base/java.io=ALL-UNNAMED \ --add-opens=java.base/java.lang=ALL-UNNAMED \ --add-opens=java.base/java.net=ALL-UNNAMED \ diff --git a/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java b/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java index 9e009d4a40d..64b01bd047f 100644 --- a/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java +++ b/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.messagebus.jdisc; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -14,6 +13,7 @@ import com.yahoo.messagebus.test.SimpleMessage; import org.junit.Test; import java.net.URI; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -81,7 +81,7 @@ public class MbusRequestHandlerTestCase { return driver; } - private static ListenableFuture<Response> dispatchMessage(final TestDriver driver, final Message msg) { + private static CompletableFuture<Response> dispatchMessage(final TestDriver driver, final Message msg) { return new RequestDispatch() { @Override diff --git a/container-search-and-docproc/pom.xml b/container-search-and-docproc/pom.xml index 056e746bf69..90c9c056131 100644 --- a/container-search-and-docproc/pom.xml +++ b/container-search-and-docproc/pom.xml @@ -199,6 +199,12 @@ <scope>provided</scope> </dependency> <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>hosted-zone-api</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> <groupId>org.json</groupId> <artifactId>json</artifactId> <scope>provided</scope> diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json index 183bb33b4f4..6ed01c2a998 100644 --- a/container-search/abi-spec.json +++ b/container-search/abi-spec.json @@ -1867,6 +1867,8 @@ "public com.yahoo.search.query.profile.compiled.CompiledQueryProfile getQueryProfile()", "public com.yahoo.search.Query$Builder setEmbedder(com.yahoo.language.process.Embedder)", "public com.yahoo.language.process.Embedder getEmbedder()", + "public com.yahoo.search.Query$Builder setZoneInfo(ai.vespa.cloud.ZoneInfo)", + "public ai.vespa.cloud.ZoneInfo getZoneInfo()", "public com.yahoo.search.Query build()" ], "fields": [] @@ -4301,6 +4303,8 @@ "public void <init>(int, com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer)", "public com.google.common.util.concurrent.ListenableFuture waitableRender(java.io.OutputStream)", "public static com.google.common.util.concurrent.ListenableFuture waitableRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)", + "public java.util.concurrent.CompletableFuture asyncRender(java.io.OutputStream)", + "public static java.util.concurrent.CompletableFuture asyncRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)", "public void render(java.io.OutputStream, com.yahoo.jdisc.handler.ContentChannel, com.yahoo.jdisc.handler.CompletionHandler)", "public void populateAccessLogEntry(com.yahoo.container.logging.AccessLogEntry)", "public java.lang.String getParsedQuery()", @@ -4322,6 +4326,7 @@ "public" ], "methods": [ + "public void <init>(com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.language.process.Embedder, com.yahoo.search.searchchain.ExecutionFactory, ai.vespa.cloud.ZoneInfo)", "public void <init>(com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.language.process.Embedder, com.yahoo.search.searchchain.ExecutionFactory)", "public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.language.process.Embedder, com.yahoo.search.searchchain.ExecutionFactory)", "public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.container.logging.AccessLog, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.search.searchchain.ExecutionFactory)", @@ -4672,6 +4677,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.pagetemplates.ResolversConfig)", "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(com.yahoo.search.pagetemplates.ResolversConfig$Component$Builder)", + "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(java.util.function.Consumer)", "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -6631,7 +6637,7 @@ "public" ], "methods": [ - "public void <init>(java.util.Map)", + "public void <init>(java.util.Map, ai.vespa.cloud.ZoneInfo)", "public java.lang.Object get(com.yahoo.processing.request.CompoundName, java.util.Map, com.yahoo.processing.request.Properties)", "public void set(com.yahoo.processing.request.CompoundName, java.lang.Object, java.util.Map)", "public java.util.Map listProperties(com.yahoo.processing.request.CompoundName, java.util.Map, com.yahoo.processing.request.Properties)" @@ -6963,6 +6969,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.query.rewrite.RewritesConfig)", "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(com.yahoo.search.query.rewrite.RewritesConfig$FsaDict$Builder)", + "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(java.util.function.Consumer)", "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -7201,13 +7208,13 @@ ], "methods": [ "public void <init>()", - "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "protected abstract void render(java.io.Writer, com.yahoo.search.Result)", "public java.lang.String getCharacterEncoding(com.yahoo.search.Result)", "public java.lang.String getDefaultSummaryClass()", "public final java.lang.String getRequestedEncoding(com.yahoo.search.Query)", "public com.yahoo.search.rendering.Renderer clone()", - "public bridge synthetic com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public bridge synthetic java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public bridge synthetic com.yahoo.processing.rendering.Renderer clone()", "public bridge synthetic com.yahoo.component.AbstractComponent clone()", "public bridge synthetic java.lang.Object clone()" @@ -7703,6 +7710,7 @@ "public java.util.Set getFilled()", "public com.yahoo.processing.response.IncomingData incoming()", "public com.google.common.util.concurrent.ListenableFuture complete()", + "public java.util.concurrent.CompletableFuture completeFuture()", "public void addDataListener(java.lang.Runnable)", "public void close()", "public bridge synthetic com.yahoo.search.result.Hit clone()", @@ -8423,6 +8431,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.statistics.TimingSearcherConfig)", "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(com.yahoo.search.statistics.TimingSearcherConfig$Timer$Builder)", + "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(java.util.function.Consumer)", "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/container-search/pom.xml b/container-search/pom.xml index 6417d164019..67400208db1 100644 --- a/container-search/pom.xml +++ b/container-search/pom.xml @@ -50,6 +50,12 @@ </dependency> <dependency> <groupId>com.yahoo.vespa</groupId> + <artifactId>hosted-zone-api</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> <artifactId>config-lib</artifactId> <version>${project.version}</version> <scope>provided</scope> diff --git a/container-search/src/main/java/com/yahoo/prelude/query/NullItem.java b/container-search/src/main/java/com/yahoo/prelude/query/NullItem.java index 1c7716be295..49c7b4d8ff6 100644 --- a/container-search/src/main/java/com/yahoo/prelude/query/NullItem.java +++ b/container-search/src/main/java/com/yahoo/prelude/query/NullItem.java @@ -1,12 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.prelude.query; - import java.nio.ByteBuffer; - /** - * A place holder for null queries to make searchers easier to write. + * A placeholder for null queries to make searchers easier to write. * * @author Steinar Knutsen */ diff --git a/container-search/src/main/java/com/yahoo/prelude/query/ToolBox.java b/container-search/src/main/java/com/yahoo/prelude/query/ToolBox.java index 03c3f7e8b2c..7f37b77919b 100644 --- a/container-search/src/main/java/com/yahoo/prelude/query/ToolBox.java +++ b/container-search/src/main/java/com/yahoo/prelude/query/ToolBox.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.prelude.query; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * Query tree helper methods and factories. diff --git a/container-search/src/main/java/com/yahoo/search/Query.java b/container-search/src/main/java/com/yahoo/search/Query.java index ddb8a392730..786a0d0e04f 100644 --- a/container-search/src/main/java/com/yahoo/search/Query.java +++ b/container-search/src/main/java/com/yahoo/search/Query.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search; +import ai.vespa.cloud.Environment; +import ai.vespa.cloud.Zone; +import ai.vespa.cloud.ZoneInfo; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.yahoo.collections.Tuple2; @@ -334,22 +337,30 @@ public class Query extends com.yahoo.processing.Request implements Cloneable { public Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile) { super(new QueryPropertyAliases(propertyAliases)); this.httpRequest = request; - init(requestMap, queryProfile, Embedder.throwsOnUse); + init(requestMap, queryProfile, Embedder.throwsOnUse, ZoneInfo.defaultInfo()); } // TODO: Deprecate most constructors above here private Query(Builder builder) { - this(builder.getRequest(), builder.getRequestMap(), builder.getQueryProfile(), builder.getEmbedder()); + this(builder.getRequest(), + builder.getRequestMap(), + builder.getQueryProfile(), + builder.getEmbedder(), + builder.getZoneInfo()); } - private Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile, Embedder embedder) { + private Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile, Embedder embedder, + ZoneInfo zoneInfo) { super(new QueryPropertyAliases(propertyAliases)); this.httpRequest = request; - init(requestMap, queryProfile, embedder); + init(requestMap, queryProfile, embedder, zoneInfo); } - private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile, Embedder embedder) { + private void init(Map<String, String> requestMap, + CompiledQueryProfile queryProfile, + Embedder embedder, + ZoneInfo zoneInfo) { startTime = httpRequest.getJDiscRequest().creationTime(TimeUnit.MILLISECONDS); if (queryProfile != null) { // Move all request parameters to the query profile just to validate that the parameter settings are legal @@ -361,7 +372,7 @@ public class Query extends com.yahoo.processing.Request implements Cloneable { // Create the full chain properties().chain(new QueryProperties(this, queryProfile.getRegistry(), embedder)). chain(new ModelObjectMap()). - chain(new RequestContextProperties(requestMap)). + chain(new RequestContextProperties(requestMap, zoneInfo)). chain(queryProfileProperties). chain(new DefaultProperties()); @@ -1131,6 +1142,7 @@ public class Query extends com.yahoo.processing.Request implements Cloneable { private Map<String, String> requestMap = null; private CompiledQueryProfile queryProfile = null; private Embedder embedder = Embedder.throwsOnUse; + private ZoneInfo zoneInfo = ZoneInfo.defaultInfo(); public Builder setRequest(String query) { request = HttpRequest.createTestRequest(query, com.yahoo.jdisc.http.HttpRequest.Method.GET); @@ -1175,6 +1187,13 @@ public class Query extends com.yahoo.processing.Request implements Cloneable { public Embedder getEmbedder() { return embedder; } + public Builder setZoneInfo(ZoneInfo zoneInfo) { + this.zoneInfo = zoneInfo; + return this; + } + + public ZoneInfo getZoneInfo() { return zoneInfo; } + /** Creates a new query from this builder. No properties are required to before calling this. */ public Query build() { return new Query(this); } diff --git a/container-search/src/main/java/com/yahoo/search/grouping/request/ArrayAtLookup.java b/container-search/src/main/java/com/yahoo/search/grouping/request/ArrayAtLookup.java index 43ca1df70d8..d3f239ac924 100644 --- a/container-search/src/main/java/com/yahoo/search/grouping/request/ArrayAtLookup.java +++ b/container-search/src/main/java/com/yahoo/search/grouping/request/ArrayAtLookup.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.grouping.request; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * Represents access of array element in a document attribute in a {@link GroupingExpression}. diff --git a/container-search/src/main/java/com/yahoo/search/grouping/request/InterpolatedLookup.java b/container-search/src/main/java/com/yahoo/search/grouping/request/InterpolatedLookup.java index ffe835946f8..519f1c99d92 100644 --- a/container-search/src/main/java/com/yahoo/search/grouping/request/InterpolatedLookup.java +++ b/container-search/src/main/java/com/yahoo/search/grouping/request/InterpolatedLookup.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.grouping.request; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * This class represents a lookup in a multivalue document diff --git a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java index 5c897245e64..64e7403fa1a 100644 --- a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java +++ b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java @@ -3,6 +3,7 @@ package com.yahoo.search.handler; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.collections.ListMap; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.container.handler.Coverage; import com.yahoo.container.handler.Timing; import com.yahoo.container.jdisc.ExtendedResponse; @@ -25,6 +26,7 @@ import java.io.OutputStream; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; /** * Wrap the result of a query as an HTTP response. @@ -75,20 +77,36 @@ public class HttpSearchResponse extends ExtendedResponse { } } + /** @deprecated Use {@link #asyncRender(OutputStream)} instead */ + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<Boolean> waitableRender(OutputStream stream) throws IOException { return waitableRender(result, query, rendererCopy, stream); } + /** @deprecated Use {@link #asyncRender(Result, Query, Renderer, OutputStream)} instead */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") public static ListenableFuture<Boolean> waitableRender(Result result, Query query, Renderer<Result> renderer, OutputStream stream) throws IOException { + return CompletableFutures.toGuavaListenableFuture(asyncRender(result, query, renderer, stream)); + } + + public CompletableFuture<Boolean> asyncRender(OutputStream stream) { + return asyncRender(result, query, rendererCopy, stream); + } + + public static CompletableFuture<Boolean> asyncRender(Result result, + Query query, + Renderer<Result> renderer, + OutputStream stream) { SearchResponse.trimHits(result); SearchResponse.removeEmptySummaryFeatureFields(result); - return renderer.render(stream, result, query.getModel().getExecution(), query); - + return renderer.renderResponse(stream, result, query.getModel().getExecution(), query); } + @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) throws IOException { if (rendererCopy instanceof AsynchronousSectionedRenderer) { @@ -98,9 +116,9 @@ public class HttpSearchResponse extends ExtendedResponse { try { try { long nanoStart = System.nanoTime(); - ListenableFuture<Boolean> promise = waitableRender(output); + CompletableFuture<Boolean> promise = asyncRender(output); if (metric != null) { - promise.addListener(new RendererLatencyReporter(nanoStart), Runnable::run); + promise.whenComplete((__, ___) -> new RendererLatencyReporter(nanoStart).run()); } } finally { if (!(rendererCopy instanceof AsynchronousSectionedRenderer)) { diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java index 52a3672e7a2..8925c647ad2 100644 --- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java +++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.handler; +import ai.vespa.cloud.Environment; +import ai.vespa.cloud.Zone; import com.google.inject.Inject; import com.yahoo.collections.Tuple2; import com.yahoo.component.ComponentSpecification; @@ -53,6 +55,7 @@ import com.yahoo.statistics.Statistics; import com.yahoo.vespa.configdefinition.SpecialtokensConfig; import com.yahoo.yolean.Exceptions; import com.yahoo.yolean.trace.TraceNode; +import ai.vespa.cloud.ZoneInfo; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -70,6 +73,7 @@ import java.util.logging.Logger; * Handles search request. * * @author Steinar Knutsen + * @author bratseth */ public class SearchHandler extends LoggingRequestHandler { @@ -103,22 +107,46 @@ public class SearchHandler extends LoggingRequestHandler { private final ExecutionFactory executionFactory; private final AtomicLong numRequestsLeftToTrace; + private final ZoneInfo zoneInfo; + private final static RequestHandlerSpec REQUEST_HANDLER_SPEC = RequestHandlerSpec.builder() .withAclMapping(SearchHandler.aclRequestMapper()).build(); + @Inject public SearchHandler(Metric metric, ContainerThreadPool threadpool, CompiledQueryProfileRegistry queryProfileRegistry, ContainerHttpConfig config, Embedder embedder, + ExecutionFactory executionFactory, + ZoneInfo zoneInfo) { + this(metric, threadpool.executor(), queryProfileRegistry, embedder, executionFactory, + config.numQueriesToTraceOnDebugAfterConstruction(), + config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()), + zoneInfo); + } + + /** + * @deprecated Use the @Inject annotated constructor instead. + */ + @Deprecated // Vespa 8 + public SearchHandler(Metric metric, + ContainerThreadPool threadpool, + CompiledQueryProfileRegistry queryProfileRegistry, + ContainerHttpConfig config, + Embedder embedder, ExecutionFactory executionFactory) { this(metric, threadpool.executor(), queryProfileRegistry, embedder, executionFactory, - config.numQueriesToTraceOnDebugAfterConstruction(), - config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey())); + config.numQueriesToTraceOnDebugAfterConstruction(), + config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()), + ZoneInfo.defaultInfo()); } - @Deprecated + /** + * @deprecated Use the @Inject annotated constructor instead. + */ + @Deprecated // Vespa 8 public SearchHandler(Statistics statistics, Metric metric, ContainerThreadPool threadpool, @@ -128,7 +156,8 @@ public class SearchHandler extends LoggingRequestHandler { ExecutionFactory executionFactory) { this(metric, threadpool.executor(), queryProfileRegistry, embedder, executionFactory, config.numQueriesToTraceOnDebugAfterConstruction(), - config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey())); + config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()), + ZoneInfo.defaultInfo()); } /** @@ -158,8 +187,9 @@ public class SearchHandler extends LoggingRequestHandler { ExecutionFactory executionFactory) { this(metric, executor, queryProfileRegistry, Embedder.throwsOnUse, executionFactory, containerHttpConfig.numQueriesToTraceOnDebugAfterConstruction(), - containerHttpConfig.hostResponseHeaderKey().equals("") ? - Optional.empty() : Optional.of(containerHttpConfig.hostResponseHeaderKey())); + containerHttpConfig.hostResponseHeaderKey().equals("") ? Optional.empty() + : Optional.of(containerHttpConfig.hostResponseHeaderKey()), + ZoneInfo.defaultInfo()); } /** @@ -176,8 +206,9 @@ public class SearchHandler extends LoggingRequestHandler { this(metric, executor, QueryProfileConfigurer.createFromConfig(queryProfileConfig).compile(), Embedder.throwsOnUse, executionFactory, containerHttpConfig.numQueriesToTraceOnDebugAfterConstruction(), - containerHttpConfig.hostResponseHeaderKey().equals("") ? - Optional.empty() : Optional.of( containerHttpConfig.hostResponseHeaderKey())); + containerHttpConfig.hostResponseHeaderKey().equals("") ? Optional.empty() + : Optional.of( containerHttpConfig.hostResponseHeaderKey()), + ZoneInfo.defaultInfo()); } /** @@ -192,7 +223,8 @@ public class SearchHandler extends LoggingRequestHandler { ExecutionFactory executionFactory, Optional<String> hostResponseHeaderKey) { this(metric, executor, queryProfileRegistry, Embedder.throwsOnUse, - executionFactory, 0, hostResponseHeaderKey); + executionFactory, 0, hostResponseHeaderKey, + ZoneInfo.defaultInfo()); } private SearchHandler(Metric metric, @@ -201,7 +233,8 @@ public class SearchHandler extends LoggingRequestHandler { Embedder embedder, ExecutionFactory executionFactory, long numQueriesToTraceOnDebugAfterStartup, - Optional<String> hostResponseHeaderKey) { + Optional<String> hostResponseHeaderKey, + ZoneInfo zoneInfo) { super(executor, metric, true); log.log(Level.FINE, () -> "SearchHandler.init " + System.identityHashCode(this)); this.queryProfileRegistry = queryProfileRegistry; @@ -213,6 +246,7 @@ public class SearchHandler extends LoggingRequestHandler { this.hostResponseHeaderKey = hostResponseHeaderKey; this.numRequestsLeftToTrace = new AtomicLong(numQueriesToTraceOnDebugAfterStartup); metric.set(SEARCH_CONNECTIONS, 0.0d, null); + this.zoneInfo = zoneInfo; } /** @deprecated use the other constructor */ @@ -308,6 +342,7 @@ public class SearchHandler extends LoggingRequestHandler { .setRequestMap(requestMap) .setQueryProfile(queryProfile) .setEmbedder(embedder) + .setZoneInfo(zoneInfo) .build(); boolean benchmarking = VespaHeaders.benchmarkOutput(request); @@ -476,7 +511,7 @@ public class SearchHandler extends LoggingRequestHandler { + Exceptions.toMessageString(e)); log.log(Level.FINE, error::getDetailedMessage); return new Result(query, error); - } catch (IllegalArgumentException e) { + } catch (Exception e) { log(request, query, e); return new Result(query, ErrorMessage.createUnspecifiedError("Failed: " + Exceptions.toMessageString(e), e)); @@ -487,10 +522,6 @@ public class SearchHandler extends LoggingRequestHandler { Exceptions.toMessageString(e), e); log(request, query, e); return new Result(query, error); - } catch (Exception e) { - log(request, query, e); - return new Result(query, ErrorMessage.createUnspecifiedError("Failed: " + - Exceptions.toMessageString(e), e)); } } diff --git a/container-search/src/main/java/com/yahoo/search/query/properties/RequestContextProperties.java b/container-search/src/main/java/com/yahoo/search/query/properties/RequestContextProperties.java index 06d827faf68..46aafcb11e3 100644 --- a/container-search/src/main/java/com/yahoo/search/query/properties/RequestContextProperties.java +++ b/container-search/src/main/java/com/yahoo/search/query/properties/RequestContextProperties.java @@ -1,13 +1,17 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.query.properties; +import ai.vespa.cloud.ZoneInfo; import com.yahoo.processing.request.CompoundName; import com.yahoo.search.query.Properties; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; /** - * Turns get(name) into get(name, request) using the request given at construction time. + * Turns get(name) into get(name, context) using the request given at construction time + * and the zone info. * This is used to allow the query's request to be supplied to all property requests * without forcing users of the query.properties() to supply this explicitly. * @@ -15,27 +19,36 @@ import java.util.Map; */ public class RequestContextProperties extends Properties { - private final Map<String, String> requestMap; - - public RequestContextProperties(Map<String, String> properties) { - this.requestMap = properties; + private final Map<String, String> context; + + public RequestContextProperties(Map<String, String> properties, ZoneInfo zoneInfo) { + if (zoneInfo == ZoneInfo.defaultInfo()) { + context = properties; + } + else { + Map<String, String> context = new HashMap<>(properties); + context.putIfAbsent("environment", zoneInfo.zone().environment().name()); + context.putIfAbsent("region", zoneInfo.zone().region()); + context.putIfAbsent("instance", zoneInfo.application().instance()); + this.context = Collections.unmodifiableMap(context); + } } @Override public Object get(CompoundName name, Map<String,String> context, com.yahoo.processing.request.Properties substitution) { - return super.get(name, context == null ? requestMap : context, substitution); + return super.get(name, context == null ? this.context : context, substitution); } @Override public void set(CompoundName name, Object value, Map<String,String> context) { - super.set(name, value, context == null ? requestMap : context); + super.set(name, value, context == null ? this.context : context); } @Override public Map<String, Object> listProperties(CompoundName path, Map<String,String> context, com.yahoo.processing.request.Properties substitution) { - return super.listProperties(path, context == null ? requestMap : context, substitution); + return super.listProperties(path, context == null ? this.context : context, substitution); } } diff --git a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java index b8a7f0d1978..6ff8f003f7e 100644 --- a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java +++ b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java @@ -1,19 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import com.yahoo.search.Query; -import com.yahoo.search.Result; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.io.ByteWriter; import com.yahoo.processing.Request; import com.yahoo.processing.execution.Execution; +import com.yahoo.search.Query; +import com.yahoo.search.Result; import java.io.IOException; import java.io.OutputStream; import java.io.Writer; import java.nio.charset.Charset; import java.nio.charset.CharsetEncoder; +import java.util.concurrent.CompletableFuture; /** * Renders a search result to a writer synchronously @@ -37,7 +36,7 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R * @return a future which is always completed to true */ @Override - public final ListenableFuture<Boolean> render(OutputStream stream, Result response, Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponse(OutputStream stream, Result response, Execution execution, Request request) { Writer writer = null; try { writer = createWriter(stream, response); @@ -50,8 +49,8 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R if (writer != null) try { writer.close(); } catch (IOException e2) {}; } - SettableFuture<Boolean> completed = SettableFuture.create(); - completed.set(true); + CompletableFuture<Boolean> completed = new CompletableFuture<>(); + completed.complete(true); return completed; } diff --git a/container-search/src/main/java/com/yahoo/search/result/Coverage.java b/container-search/src/main/java/com/yahoo/search/result/Coverage.java index dd01494879d..5074a520a4e 100644 --- a/container-search/src/main/java/com/yahoo/search/result/Coverage.java +++ b/container-search/src/main/java/com/yahoo/search/result/Coverage.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.result; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * The coverage report for a result set. diff --git a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java index 1ae3f4e60cc..6d09bf66175 100644 --- a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java +++ b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java @@ -5,6 +5,7 @@ import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.collections.ListenableArrayList; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.net.URI; import com.yahoo.prelude.fastsearch.SortDataHitSorter; import com.yahoo.processing.response.ArrayDataList; @@ -19,6 +20,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; /** @@ -84,7 +86,7 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable< */ private DefaultErrorHit errorHit = null; - private final ListenableFuture<DataList<Hit>> completedFuture; + private final CompletableFuture<DataList<Hit>> completedFuture; private final IncomingData<Hit> incomingHits; @@ -965,7 +967,13 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable< public IncomingData<Hit> incoming() { return incomingHits; } @Override - public ListenableFuture<DataList<Hit>> complete() { return completedFuture; } + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") + public ListenableFuture<DataList<Hit>> complete() { + return CompletableFutures.toGuavaListenableFuture(completedFuture); + } + + @Override public CompletableFuture<DataList<Hit>> completeFuture() { return completedFuture; } @Override public void addDataListener(Runnable runnable) { diff --git a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java index 27f34914ee9..f81083221a8 100644 --- a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java +++ b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java @@ -2,7 +2,7 @@ package com.yahoo.search.searchers; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.prelude.query.Item; import com.yahoo.prelude.query.NearestNeighborItem; diff --git a/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveCharStream.java b/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveCharStream.java new file mode 100644 index 00000000000..1784aa77966 --- /dev/null +++ b/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveCharStream.java @@ -0,0 +1,77 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +package com.yahoo.search.yql; + +import org.antlr.v4.runtime.misc.Interval; +import org.antlr.v4.runtime.CharStream; + +/** + * This class supports case-insensitive lexing by wrapping an existing + * {@link CharStream} and forcing the lexer to see only lowercase characters. + * Grammar literals should then be only lower case such as 'begin'. The text of the character + * stream is unaffected. Example: input 'BeGiN' would match lexer rule + * 'begin', but getText() would return 'BeGiN'. + * It is based on https://github.com/antlr/antlr4/blob/master/doc/resources/CaseChangingCharStream.java + */ +class CaseInsensitiveCharStream implements CharStream { + + final CharStream stream; + + /** + * Constructs a new CaseChangingCharStream wrapping the given {@link CharStream} forcing + * all characters lower case. + * @param stream The stream to wrap. + */ + CaseInsensitiveCharStream(CharStream stream) { + this.stream = stream; + } + + @Override + public String getText(Interval interval) { + return stream.getText(interval); + } + + @Override + public void consume() { + stream.consume(); + } + + @Override + public int LA(int i) { + int c = stream.LA(i); + if (c <= 0) { + return c; + } + return Character.toLowerCase(c); + } + + @Override + public int mark() { + return stream.mark(); + } + + @Override + public void release(int marker) { + stream.release(marker); + } + + @Override + public int index() { + return stream.index(); + } + + @Override + public void seek(int index) { + stream.seek(index); + } + + @Override + public int size() { + return stream.size(); + } + + @Override + public String getSourceName() { + return stream.getSourceName(); + } +} diff --git a/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveFileStream.java b/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveFileStream.java deleted file mode 100644 index 5e21ce234d8..00000000000 --- a/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveFileStream.java +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.search.yql; - -import org.antlr.v4.runtime.ANTLRFileStream; -import org.antlr.v4.runtime.CharStream; - -import java.io.IOException; - -/** - * Enable ANTLR to do case insensitive comparisons when reading from files without throwing away the case in the token. - */ - -class CaseInsensitiveFileStream extends ANTLRFileStream { - - public CaseInsensitiveFileStream(String fileName) throws IOException { - super(fileName); - } - - public CaseInsensitiveFileStream(String fileName, String encoding) throws IOException { - super(fileName, encoding); - } - - @Override - public int LA(int i) { - if (i == 0) { - return 0; - } - if (i < 0) { - i++; // e.g., translate LA(-1) to use offset 0 - } - - if ((p + i - 1) >= n) { - return CharStream.EOF; - } - return Character.toLowerCase(data[p + i - 1]); - } - -} diff --git a/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveInputStream.java b/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveInputStream.java deleted file mode 100644 index ed071370f4e..00000000000 --- a/container-search/src/main/java/com/yahoo/search/yql/CaseInsensitiveInputStream.java +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.search.yql; - -import org.antlr.v4.runtime.ANTLRInputStream; -import org.antlr.v4.runtime.CharStream; - -import java.io.IOException; -import java.io.InputStream; - -/** - * Enable ANTLR to do case insensitive comparisons when reading from files without throwing away the case in the token. - */ -class CaseInsensitiveInputStream extends ANTLRInputStream { - - public CaseInsensitiveInputStream() { - super(); - } - - public CaseInsensitiveInputStream(InputStream input) throws IOException { - super(input); - } - - public CaseInsensitiveInputStream(InputStream input, int size) throws IOException { - super(input, size); - } - - public CaseInsensitiveInputStream(char[] data, int numberOfActualCharsInArray) throws IOException { - super(data, numberOfActualCharsInArray); - } - - public CaseInsensitiveInputStream(String input) throws IOException { - super(input); - } - - @Override - public int LA(int i) { - if (i == 0) { - return 0; - } - if (i < 0) { - i++; // e.g., translate LA(-1) to use offset 0 - } - - if ((p + i - 1) >= n) { - return CharStream.EOF; - } - return Character.toLowerCase(data[p + i - 1]); - } - -} diff --git a/container-search/src/main/java/com/yahoo/search/yql/FieldFiller.java b/container-search/src/main/java/com/yahoo/search/yql/FieldFiller.java index 343f1b06e84..13a9f9510cd 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/FieldFiller.java +++ b/container-search/src/main/java/com/yahoo/search/yql/FieldFiller.java @@ -6,7 +6,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.chain.dependencies.After; import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig; import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig.Documentdb; diff --git a/container-search/src/main/java/com/yahoo/search/yql/FieldFilter.java b/container-search/src/main/java/com/yahoo/search/yql/FieldFilter.java index c4ab612651d..d5603ff0171 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/FieldFilter.java +++ b/container-search/src/main/java/com/yahoo/search/yql/FieldFilter.java @@ -5,7 +5,7 @@ import java.util.Iterator; import java.util.Map.Entry; import java.util.Set; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.component.chain.dependencies.After; import com.yahoo.component.chain.dependencies.Before; import com.yahoo.prelude.fastsearch.FastHit; diff --git a/container-search/src/main/java/com/yahoo/search/yql/MinimalQueryInserter.java b/container-search/src/main/java/com/yahoo/search/yql/MinimalQueryInserter.java index 8223cb2cba2..649d678db55 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/MinimalQueryInserter.java +++ b/container-search/src/main/java/com/yahoo/search/yql/MinimalQueryInserter.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.yql; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.inject.Inject; import com.yahoo.language.Linguistics; import com.yahoo.language.simple.SimpleLinguistics; diff --git a/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java b/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java index d32033249f1..c84ed0a0565 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java +++ b/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java @@ -56,6 +56,7 @@ import com.yahoo.search.yql.yqlplusParser.TimeoutContext; import com.yahoo.search.yql.yqlplusParser.UnaryExpressionContext; import com.yahoo.search.yql.yqlplusParser.WhereContext; +import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.BaseErrorListener; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonTokenStream; @@ -69,7 +70,6 @@ import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.runtime.tree.RuleNode; import org.antlr.v4.runtime.tree.TerminalNode; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.Iterator; @@ -83,50 +83,39 @@ import java.util.Set; final class ProgramParser { public yqlplusParser prepareParser(String programName, InputStream input) throws IOException { - return prepareParser(programName, new CaseInsensitiveInputStream(input)); + //TODO ANTLRInputStream goes away on 4.7, so must use CharStreams.fromXXX() when upgrading + return prepareParser(programName, new CaseInsensitiveCharStream(new ANTLRInputStream(input))); } public yqlplusParser prepareParser(String programName, String input) throws IOException { - return prepareParser(programName, new CaseInsensitiveInputStream(input)); + //TODO ANTLRInputStream goes away on 4.7, so must use CharStreams.fromXXX() when upgrading + return prepareParser(programName, new CaseInsensitiveCharStream(new ANTLRInputStream(input))); } - public yqlplusParser prepareParser(File file) throws IOException { - return prepareParser(file.getAbsoluteFile().toString(), new CaseInsensitiveFileStream(file.getAbsolutePath())); + private static class ErrorListener extends BaseErrorListener { + private final String programName; + ErrorListener(String programName) { this.programName = programName; } + @Override + public void syntaxError(Recognizer<?, ?> recognizer, + Object offendingSymbol, + int line, + int charPositionInLine, + String msg, + RecognitionException e) { + throw new ProgramCompileException(new Location(programName, line, charPositionInLine), "%s", msg); + } } private yqlplusParser prepareParser(String programName, CharStream input) { + ErrorListener errorListener = new ErrorListener(programName); yqlplusLexer lexer = new yqlplusLexer(input); lexer.removeErrorListeners(); - lexer.addErrorListener(new BaseErrorListener() { - - @Override - public void syntaxError(Recognizer<?, ?> recognizer, - Object offendingSymbol, - int line, - int charPositionInLine, - String msg, - RecognitionException e) { - throw new ProgramCompileException(new Location(programName, line, charPositionInLine), "%s", msg); - } - - }); + lexer.addErrorListener(errorListener); TokenStream tokens = new CommonTokenStream(lexer); yqlplusParser parser = new yqlplusParser(tokens); parser.removeErrorListeners(); - parser.addErrorListener(new BaseErrorListener() { - - @Override - public void syntaxError(Recognizer<?, ?> recognizer, - Object offendingSymbol, - int line, - int charPositionInLine, - String msg, - RecognitionException e) { - throw new ProgramCompileException(new Location(programName, line, charPositionInLine), "%s", msg); - } - - }); + parser.addErrorListener(errorListener); parser.getInterpreter().setPredictionMode(PredictionMode.SLL); return parser; } diff --git a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java index c688f61add0..8334775b8e2 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java +++ b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java @@ -15,7 +15,7 @@ import java.util.Set; import java.util.StringTokenizer; import java.util.function.Supplier; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.base.Preconditions; import com.yahoo.collections.LazyMap; import com.yahoo.collections.LazySet; diff --git a/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr index 32f8e86b59f..9a147887207 100644 --- a/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr +++ b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr @@ -5,4 +5,6 @@ equiv1 +> =equiv2 =equiv3; testfield:[test] -> =testfield:e1 =testfield:e2 =testfield:e3; +synonymfield:[test] -> =[test]; + [test] :- foo, bar, baz; diff --git a/container-search/src/test/java/com/yahoo/search/handler/SearchHandlerTest.java b/container-search/src/test/java/com/yahoo/search/handler/SearchHandlerTest.java index 2c76383bff4..e1987aa50ca 100644 --- a/container-search/src/test/java/com/yahoo/search/handler/SearchHandlerTest.java +++ b/container-search/src/test/java/com/yahoo/search/handler/SearchHandlerTest.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.net.URI; import java.util.concurrent.Executors; +import static com.yahoo.yolean.Exceptions.uncheckInterrupted; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; @@ -45,6 +46,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author bratseth @@ -184,7 +186,7 @@ public class SearchHandlerTest { RequestHandlerTestDriver.MockResponseHandler responseHandler = newDriver.sendRequest( "http://localhost/search/?yql=select%20*%20from%20foo%20where%20bar%20%3E%201453501295%27%3B"); responseHandler.readAll(); - assertThat(responseHandler.getStatus(), is(400)); + assertEquals(400, responseHandler.getStatus()); assertEquals(Request.RequestType.READ, responseHandler.getResponse().getRequestType()); } } @@ -304,22 +306,19 @@ public class SearchHandlerTest { assertOkResult(driver.sendRequest(request), jsonResult); } - private boolean waitForMetric(String key) { - try { - for (int i = 0; i < 10; i++) { - if (metric.metrics().containsKey(key)) return true; - Thread.sleep(20); - } - } catch (InterruptedException e) { + private void assertMetricPresent(String key) { + for (int i = 0; i < 200; i++) { + if (metric.metrics().containsKey(key)) return; + uncheckInterrupted(() -> Thread.sleep(1)); } - return false; + fail(String.format("Could not find metric with key '%s' in '%s'", key, metric)); } private void assertOkResult(RequestHandlerTestDriver.MockResponseHandler response, String expected) { assertEquals(expected, response.readAll()); assertEquals(200, response.getStatus()); assertEquals(selfHostname, response.getResponse().headers().get(myHostnameHeader).get(0)); - assertTrue(waitForMetric(SearchHandler.RENDER_LATENCY_METRIC)); + assertMetricPresent(SearchHandler.RENDER_LATENCY_METRIC); } @Test diff --git a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java index 0819cbd72b4..b39c170c6a3 100644 --- a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java @@ -53,7 +53,7 @@ public class ExecutionAbstractTestCase { assertRendered(result,resultFileName,false); } - @SuppressWarnings("deprecation") + @SuppressWarnings({"deprecation", "removal"}) protected void assertRendered(Result result, String resultFileName, boolean print) { try { PageTemplatesXmlRenderer renderer = new PageTemplatesXmlRenderer(); diff --git a/container-search/src/test/java/com/yahoo/search/query/profile/test/QueryProfileVariantsTestCase.java b/container-search/src/test/java/com/yahoo/search/query/profile/test/QueryProfileVariantsTestCase.java index 231020c4fa5..3542e1413eb 100644 --- a/container-search/src/test/java/com/yahoo/search/query/profile/test/QueryProfileVariantsTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/query/profile/test/QueryProfileVariantsTestCase.java @@ -1,6 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.query.profile.test; +import ai.vespa.cloud.ApplicationId; +import ai.vespa.cloud.Environment; +import ai.vespa.cloud.Zone; +import ai.vespa.cloud.ZoneInfo; +import com.yahoo.jdisc.application.Application; import com.yahoo.jdisc.http.HttpRequest.Method; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.processing.request.CompoundName; @@ -1331,6 +1336,53 @@ public class QueryProfileVariantsTestCase { assertEquals("yahoo/alerts", cAlert.get("vertical.custid", toMap("entry=alert", "intl=us", "lang=en-US"))); } + @Test + public void testZoneInfoInContext() { + QueryProfileRegistry registry = new QueryProfileRegistry(); + QueryProfile profile = new QueryProfile("test"); + profile.setDimensions(new String[] { "environment", "region", "instance" }); + profile.set("value", "default", registry); + profile.set("value", "prod-region1-instance1", + toMap("environment=prod", "region=region1", "instance=instance1"), + registry); + profile.set("value", "prod-instance2", + toMap("environment=prod", "instance=instance2"), + registry); + profile.set("value", "prod-region3", + toMap("environment=prod", "region=region3"), + registry); + profile.set("value", "dev", + toMap("environment=dev"), + registry); + registry.register(profile); + + CompiledQueryProfileRegistry cRegistry = registry.compile(); + CompiledQueryProfile cTest = cRegistry.findQueryProfile("test"); + + assertValueForZone("default", ZoneInfo.defaultInfo(), cTest); + assertValueForZone("prod-region1-instance1", + new ZoneInfo(new ApplicationId("tenant1", "application1", "instance1"), + new Zone(Environment.prod, "region1")), + cTest); + assertValueForZone("prod-instance2", + new ZoneInfo(new ApplicationId("tenant2", "application2", "instance2"), + new Zone(Environment.prod, "region1")), + cTest); + assertValueForZone("prod-region3", + new ZoneInfo(new ApplicationId("tenant3", "application3", "instance3"), + new Zone(Environment.prod, "region3")), + cTest); + assertValueForZone("dev", + new ZoneInfo(new ApplicationId("tenant4", "application4", "instance4"), + new Zone(Environment.dev, "region4")), + cTest); + } + + private void assertValueForZone(String expected, ZoneInfo zoneInfo, CompiledQueryProfile cTest) { + assertEquals(expected, + new Query.Builder().setQueryProfile(cTest).setZoneInfo(zoneInfo).build().properties().get("value")); + } + private void assertGet(String expectedValue, String parameter, String[] dimensionValues, QueryProfile profile, CompiledQueryProfile cprofile) { Map<String,String> context=toMap(profile,dimensionValues); assertEquals("Looking up '" + parameter + "' for '" + Arrays.toString(dimensionValues) + "'",expectedValue,cprofile.get(parameter,context)); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java index 359aed85d30..7db29568d5b 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java @@ -1,23 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.junit.Test; - -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.concurrent.Receiver; import com.yahoo.processing.response.Data; import com.yahoo.processing.response.DataList; @@ -29,6 +13,20 @@ import com.yahoo.search.result.HitGroup; import com.yahoo.search.result.Relevance; import com.yahoo.search.searchchain.Execution; import com.yahoo.text.Utf8; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test adding hits to a hit group during rendering. @@ -36,18 +34,20 @@ import com.yahoo.text.Utf8; * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a> */ public class AsyncGroupPopulationTestCase { - private static class WrappedFuture<F> implements ListenableFuture<F> { + private static class WrappedFuture<F> extends CompletableFuture<F> { Receiver<Boolean> isListening = new Receiver<>(); - private ListenableFuture<F> wrapped; + private final CompletableFuture<F> wrapped; - WrappedFuture(ListenableFuture<F> wrapped) { + WrappedFuture(CompletableFuture<F> wrapped) { this.wrapped = wrapped; } - public void addListener(Runnable listener, Executor executor) { - wrapped.addListener(listener, executor); + @Override + public CompletableFuture<F> whenCompleteAsync(BiConsumer<? super F, ? super Throwable> action, Executor executor) { + wrapped.whenCompleteAsync(action); isListening.put(Boolean.TRUE); + return this; } public boolean cancel(boolean mayInterruptIfRunning) { @@ -73,14 +73,14 @@ public class AsyncGroupPopulationTestCase { } private static class ObservableIncoming<DATATYPE extends Data> extends DefaultIncomingData<DATATYPE> { - WrappedFuture<DataList<DATATYPE>> waitForIt = null; + volatile WrappedFuture<DataList<DATATYPE>> waitForIt = null; private final Object lock = new Object(); @Override - public ListenableFuture<DataList<DATATYPE>> completed() { + public CompletableFuture<DataList<DATATYPE>> completedFuture() { synchronized (lock) { if (waitForIt == null) { - waitForIt = new WrappedFuture<>(super.completed()); + waitForIt = new WrappedFuture<>(super.completedFuture()); } } return waitForIt; @@ -99,7 +99,7 @@ public class AsyncGroupPopulationTestCase { @Test public final void test() throws InterruptedException, ExecutionException, - JsonParseException, JsonMappingException, IOException { + IOException { String rawExpected = "{" + " \"root\": {" + " \"children\": [" @@ -125,10 +125,10 @@ public class AsyncGroupPopulationTestCase { JsonRenderer renderer = new JsonRenderer(); Result result = new Result(new Query(), h); renderer.init(); - ListenableFuture<Boolean> f = renderer.render(out, result, + CompletableFuture<Boolean> f = renderer.renderResponse(out, result, new Execution(Execution.Context.createContextStub()), result.getQuery()); - WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completed(); + WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completedFuture(); x.isListening.get(86_400_000); h.incoming().add(new Hit("yahoo2")); h.incoming().markComplete(); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java index 7395b4802a0..f3a71af0b9e 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java @@ -364,6 +364,7 @@ public class JsonRendererTestCase { } @Test + @SuppressWarnings("removal") public void testEmptyTracing() throws IOException, InterruptedException, ExecutionException { String expected = "{" + " \"root\": {" @@ -391,7 +392,7 @@ public class JsonRendererTestCase { assertEqualJson(expected, summary); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testTracingWithEmptySubtree() throws IOException, InterruptedException, ExecutionException { String expected = "{" @@ -1372,6 +1373,7 @@ public class JsonRendererTestCase { return render(execution, r); } + @SuppressWarnings("removal") private String render(Execution execution, Result r) throws InterruptedException, ExecutionException { ByteArrayOutputStream bs = new ByteArrayOutputStream(); ListenableFuture<Boolean> f = renderer.render(bs, r, execution, null); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java index ae1eade12d3..99911276f50 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java @@ -1,17 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.chain.Chain; import com.yahoo.prelude.fastsearch.FastHit; @@ -26,6 +15,15 @@ import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource; import com.yahoo.search.statistics.ElapsedTimeTestCase.UselessSearcher; import com.yahoo.search.statistics.TimeTracker; import com.yahoo.text.Utf8; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.util.concurrent.ExecutionException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Check the legacy sync default renderer doesn't spontaneously combust. @@ -56,7 +54,7 @@ public class SyncDefaultRendererTestCase { assertEquals("text/xml", d.getMimeType()); } - @SuppressWarnings("deprecation") + @SuppressWarnings({"deprecation", "removal"}) @Test public void testRenderWriterResult() throws InterruptedException, ExecutionException { Query q = new Query("/?query=a&tracelevel=5"); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java index 0fad449763f..b3534d580d8 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java @@ -1,39 +1,36 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.*; - -import java.io.ByteArrayOutputStream; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - +import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentId; +import com.yahoo.component.chain.Chain; import com.yahoo.container.QrSearchersConfig; import com.yahoo.prelude.Index; import com.yahoo.prelude.IndexFacts; import com.yahoo.prelude.IndexModel; import com.yahoo.prelude.SearchDefinition; -import com.yahoo.prelude.searcher.JuniperSearcher; -import com.yahoo.search.result.Hit; -import com.yahoo.search.result.Relevance; -import com.yahoo.search.searchchain.Execution; -import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher; -import org.junit.Test; - -import com.google.common.util.concurrent.ListenableFuture; -import com.yahoo.component.chain.Chain; import com.yahoo.prelude.fastsearch.FastHit; +import com.yahoo.prelude.searcher.JuniperSearcher; import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; import com.yahoo.search.result.Coverage; import com.yahoo.search.result.ErrorMessage; +import com.yahoo.search.result.Hit; import com.yahoo.search.result.HitGroup; +import com.yahoo.search.result.Relevance; +import com.yahoo.search.searchchain.Execution; +import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher; import com.yahoo.search.statistics.ElapsedTimeTestCase; -import com.yahoo.search.statistics.TimeTracker; import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource; +import com.yahoo.search.statistics.TimeTracker; import com.yahoo.text.Utf8; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test the XML renderer @@ -158,6 +155,7 @@ public class XMLRendererTestCase { assertTrue(summary.contains("<meta type=\"context\">")); } + @SuppressWarnings("removal") private String render(Result result) throws Exception { XmlRenderer renderer = new XmlRenderer(); renderer.init(); diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/config/test/SearchChainConfigurerTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/config/test/SearchChainConfigurerTestCase.java index a642d14b368..d6e99ec3a44 100644 --- a/container-search/src/test/java/com/yahoo/search/searchchain/config/test/SearchChainConfigurerTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/searchchain/config/test/SearchChainConfigurerTestCase.java @@ -5,6 +5,7 @@ import com.yahoo.config.search.IntConfig; import com.yahoo.config.search.StringConfig; import com.yahoo.container.core.config.HandlersConfigurerDi; import com.yahoo.container.core.config.testutil.HandlersConfigurerTestWrapper; +import com.yahoo.lang.MutableInteger; import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; @@ -38,6 +39,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.hamcrest.CoreMatchers.*; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertThat; @@ -129,7 +131,7 @@ public class SearchChainConfigurerTestCase { @Test public void testConfigurableSearcher() { - HandlersConfigurerTestWrapper configurer=new HandlersConfigurerTestWrapper("dir:" + testDir); + HandlersConfigurerTestWrapper configurer = new HandlersConfigurerTestWrapper("dir:" + testDir); SearchChain configurable = getSearchChainRegistryFrom(configurer).getComponent("configurable"); assertNotNull(configurable); @@ -137,9 +139,8 @@ public class SearchChainConfigurerTestCase { Searcher s = configurable.searchers().get(0); assertThat(s, instanceOf(ConfigurableSearcher.class)); ConfigurableSearcher searcher = (ConfigurableSearcher)s; - assertThat("Value from int.cfg file", searcher.intConfig.intVal(), is(7)); - assertThat("Value from string.cfg file", searcher.stringConfig.stringVal(), - is("com.yahoo.search.searchchain.config.test")); + assertEquals("Value from int.cfg file", 7, searcher.intConfig.intVal()); + assertEquals("Value from string.cfg file", "com.yahoo.search.searchchain.config.test", searcher.stringConfig.stringVal()); configurer.shutdown(); } @@ -342,7 +343,7 @@ public class SearchChainConfigurerTestCase { if (append) { Pattern p = Pattern.compile("^[a-z]+" + "\\[\\d+\\]\\.id (.+)"); BufferedReader reader = new BufferedReader(new InputStreamReader( - new FileInputStream(new File(componentsFile)), StandardCharsets.UTF_8)); + new FileInputStream(componentsFile), StandardCharsets.UTF_8)); while ((line = reader.readLine()) != null) { Matcher m = p.matcher(line); if (m.matches() && !m.group(1).equals(HandlersConfigurerDi.RegistriesHack.class.getName())) { @@ -353,7 +354,7 @@ public class SearchChainConfigurerTestCase { reader.close(); } BufferedReader reader = new BufferedReader(new InputStreamReader( - new FileInputStream(new File(configFile)), StandardCharsets.UTF_8)); + new FileInputStream(configFile), StandardCharsets.UTF_8)); Pattern component = Pattern.compile("^" + componentType + "\\[\\d+\\]\\.id (.+)"); while ((line = reader.readLine()) != null) { Matcher m = component.matcher(line); @@ -366,13 +367,22 @@ public class SearchChainConfigurerTestCase { buf.append("components[").append(i++).append("].id ").append(HandlersConfigurerDi.RegistriesHack.class.getName()).append("\n"); if (componentType.equals("components")) - buf.append("components[").append(i++).append("].id ").append(ExecutionFactory.class.getName()).append("\n"); + i = addStandardComponents(i, buf); buf.insert(0, "components["+i+"]\n"); - Writer writer = new OutputStreamWriter(new FileOutputStream(new File(componentsFile)), StandardCharsets.UTF_8); + Writer writer = new OutputStreamWriter(new FileOutputStream(componentsFile), StandardCharsets.UTF_8); writer.write(buf.toString()); writer.flush(); writer.close(); } + private static int addStandardComponents(int i, StringBuilder builder) { + addComponent(ExecutionFactory.class.getName(), i++, builder); + return i; + } + + private static void addComponent(String component, int i, StringBuilder builder) { + builder.append("components[").append(i).append("].id ").append(component).append("\n"); + } + } diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java index 9c36971f688..2426b18f018 100644 --- a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java @@ -2,7 +2,8 @@ package com.yahoo.search.searchchain.test; import com.yahoo.component.ComponentId; -import com.yahoo.processing.response.*; +import com.yahoo.component.chain.Chain; +import com.yahoo.processing.response.IncomingData; import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; @@ -11,18 +12,18 @@ import com.yahoo.search.federation.sourceref.SearchChainResolver; import com.yahoo.search.result.Hit; import com.yahoo.search.result.HitGroup; import com.yahoo.search.searchchain.Execution; - import com.yahoo.search.searchchain.SearchChainRegistry; import com.yahoo.search.searchchain.model.federation.FederationOptions; import org.junit.Test; -import static org.junit.Assert.*; import java.util.Collections; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.yahoo.component.chain.Chain; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; /** * Tests using the async capabilities of the Processing parent framework of searchers. @@ -31,6 +32,7 @@ import com.yahoo.component.chain.Chain; */ public class FutureDataTestCase { + @SuppressWarnings("removal") @Test public void testAsyncFederation() throws InterruptedException, ExecutionException { // Setup environment @@ -77,6 +79,7 @@ public class FutureDataTestCase { assertEquals("async:1", asyncGroup.get(1).getId().toString()); } + @SuppressWarnings("removal") @Test public void testFutureData() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java index f9005387716..ab99118b179 100644 --- a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java @@ -4,15 +4,14 @@ package com.yahoo.search.yql; import com.yahoo.component.chain.Chain; import com.yahoo.container.QrSearchersConfig; import com.yahoo.language.Language; -import com.yahoo.language.simple.SimpleLinguistics; import com.yahoo.prelude.Index; import com.yahoo.prelude.IndexFacts; import com.yahoo.prelude.IndexModel; import com.yahoo.prelude.SearchDefinition; import com.yahoo.prelude.query.AndItem; import com.yahoo.prelude.query.BoolItem; -import com.yahoo.prelude.query.IndexedItem; import com.yahoo.prelude.query.ExactStringItem; +import com.yahoo.prelude.query.IndexedItem; import com.yahoo.prelude.query.Item; import com.yahoo.prelude.query.MarkerWordItem; import com.yahoo.prelude.query.PhraseItem; @@ -28,6 +27,7 @@ import com.yahoo.prelude.query.WeakAndItem; import com.yahoo.prelude.query.WordAlternativesItem; import com.yahoo.prelude.query.WordItem; import com.yahoo.prelude.querytransform.QueryRewrite; +import com.yahoo.processing.IllegalInputException; import com.yahoo.search.Query; import com.yahoo.search.Searcher; import com.yahoo.search.config.IndexInfoConfig; @@ -42,7 +42,6 @@ import com.yahoo.search.query.Sorting.Order; import com.yahoo.search.query.Sorting.UcaSorter; import com.yahoo.search.query.parser.Parsable; import com.yahoo.search.query.parser.ParserEnvironment; - import com.yahoo.search.searchchain.Execution; import org.junit.Test; @@ -71,6 +70,13 @@ public class YqlParserTestCase { private final YqlParser parser = new YqlParser(new ParserEnvironment()); @Test + public void failsGracefullyOnMissingQuoteEscapingAndSubsequentUnicodeCharacter() { + assertParseFail("select * from bar where rank(ids contains 'http://en.wikipedia.org/wiki/Hors_d'Å“uvre') limit 10;", + new IllegalInputException("com.yahoo.search.yql.ProgramCompileException: query:L1:79 " + + "no viable alternative at input 'rank(ids contains 'http://en.wikipedia.org/wiki/Hors_d''")); + } + + @Test public void testParserDefaults() { assertTrue(parser.isQueryParser()); assertNull(parser.getDocTypes()); diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/EndpointStatus.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/EndpointStatus.java index 55a7af45fd2..4b326bc7430 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/EndpointStatus.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/EndpointStatus.java @@ -1,58 +1,45 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.application.v4.model; +import java.time.Instant; +import java.util.Objects; + /** - * Represent the operational status of a service endpoint (where the endpoint itself - * is identified by the container cluster id). - * - * The status of an endpoint may be assigned from the controller. + * Represent the routing status for all endpoints of a deployment. * * @author smorgrav */ public class EndpointStatus { + private final String agent; - private final String reason; private final Status status; - private final long epoch; - - public enum Status { - in, - out, - unknown; - } + private final Instant changedAt; - public EndpointStatus(Status status, String reason, String agent, long epoch) { - this.status = status; - this.reason = reason; - this.agent = agent; - this.epoch = epoch; + public EndpointStatus(Status status, String agent, Instant changedAt) { + this.status = Objects.requireNonNull(status); + this.agent = Objects.requireNonNull(agent); + this.changedAt = Objects.requireNonNull(changedAt); } - /** - * @return The agent responsible setting this status - */ - public String getAgent() { + /** Returns the agent responsible setting this status */ + public String agent() { return agent; } - /** - * @return The reason for this status (e.g. 'incident INCXXX') - */ - public String getReason() { - return reason; + /** Returns the current status */ + public Status status() { + return status; } - /** - * @return The current status - */ - public Status getStatus() { - return status; + /** Returns when this was last changed */ + public Instant changedAt() { + return changedAt; } - /** - * @return The epoch for when this status became active - */ - public long getEpoch() { - return epoch; + public enum Status { + in, + out, + unknown; } + } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java index b9cb0d773c6..d4e11163343 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java @@ -30,6 +30,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringClient import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceDatabaseClient; import com.yahoo.vespa.hosted.controller.api.integration.routing.GlobalRoutingService; import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretService; +import com.yahoo.vespa.hosted.controller.api.integration.user.RoleMaintainer; import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestClient; import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry; @@ -108,4 +109,6 @@ public interface ServiceRegistry { HorizonClient horizonClient(); PlanRegistry planRegistry(); + + RoleMaintainer roleMaintainer(); } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java index 561475caa54..4679f660319 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java @@ -252,6 +252,10 @@ public class ZmsClientMock implements ZmsClient { } @Override + public void deleteRole(AthenzRole athenzRole) { + athenz.domains.get(athenzRole.domain()).roles.removeIf(role -> role.name().equals(athenzRole.roleName())); + } + @Override public void close() {} private static AthenzDomain getTenantDomain(AthenzResourceName resource) { diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Plan.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Plan.java index 84e36ea75d1..bd4c3c1a56f 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Plan.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Plan.java @@ -23,4 +23,7 @@ public interface Plan { /** Is this a plan that is billed */ boolean isBilled(); + + /** Is this a plan that gets on-call support */ + boolean isSupported(); } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/PlanRegistryMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/PlanRegistryMock.java index 60eddbd24ff..5fb4d853e67 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/PlanRegistryMock.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/PlanRegistryMock.java @@ -11,9 +11,9 @@ import java.util.stream.Stream; public class PlanRegistryMock implements PlanRegistry { - public static final Plan freeTrial = new MockPlan("trial", false, 0, 0, 0, 200, "Free Trial - for testing purposes"); - public static final Plan paidPlan = new MockPlan("paid", true, "0.09", "0.009", "0.0003", 500, "Paid Plan - for testing purposes"); - public static final Plan nonePlan = new MockPlan("none", false, 0, 0, 0, 0, "None Plan - for testing purposes"); + public static final Plan freeTrial = new MockPlan("trial", false, false, 0, 0, 0, 200, "Free Trial - for testing purposes"); + public static final Plan paidPlan = new MockPlan("paid", true, true, "0.09", "0.009", "0.0003", 500, "Paid Plan - for testing purposes"); + public static final Plan nonePlan = new MockPlan("none", false, false, 0, 0, 0, 0, "None Plan - for testing purposes"); @Override public Plan defaultPlan() { @@ -33,18 +33,20 @@ public class PlanRegistryMock implements PlanRegistry { private final CostCalculator costCalculator; private final QuotaCalculator quotaCalculator; private final boolean billed; + private final boolean supported; - public MockPlan(String planId, boolean billed, double cpuPrice, double memPrice, double dgbPrice, int quota, String description) { - this(PlanId.from(planId), billed, new MockCostCalculator(cpuPrice, memPrice, dgbPrice), () -> Quota.unlimited().withBudget(quota), description); + public MockPlan(String planId, boolean billed, boolean supported, double cpuPrice, double memPrice, double dgbPrice, int quota, String description) { + this(PlanId.from(planId), billed, supported, new MockCostCalculator(cpuPrice, memPrice, dgbPrice), () -> Quota.unlimited().withBudget(quota), description); } - public MockPlan(String planId, boolean billed, String cpuPrice, String memPrice, String dgbPrice, int quota, String description) { - this(PlanId.from(planId), billed, new MockCostCalculator(cpuPrice, memPrice, dgbPrice), () -> Quota.unlimited().withBudget(quota), description); + public MockPlan(String planId, boolean billed, boolean supported, String cpuPrice, String memPrice, String dgbPrice, int quota, String description) { + this(PlanId.from(planId), billed, supported, new MockCostCalculator(cpuPrice, memPrice, dgbPrice), () -> Quota.unlimited().withBudget(quota), description); } - public MockPlan(PlanId planId, boolean billed, MockCostCalculator calculator, QuotaCalculator quota, String description) { + public MockPlan(PlanId planId, boolean billed, boolean supported, MockCostCalculator calculator, QuotaCalculator quota, String description) { this.planId = planId; this.billed = billed; + this.supported = supported; this.costCalculator = calculator; this.quotaCalculator = quota; this.description = description; @@ -74,6 +76,11 @@ public class PlanRegistryMock implements PlanRegistry { public boolean isBilled() { return billed; } + + @Override + public boolean isSupported() { + return supported; + } } private static class MockCostCalculator implements CostCalculator { diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java index 122f836c908..7f9b693f413 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.api.integration.configserver; import com.yahoo.component.Version; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.flags.json.FlagData; import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics; @@ -85,12 +84,12 @@ public interface ConfigServer { /** * Set new status for a endpoint of a single deployment. * - * @param deployment The deployment to change - * @param upstreamName The upstream to modify. Upstream name is a unique identifier for the global route of a - * deployment in the shared routing layer - * @param status The new status + * @param deployment The deployment to change + * @param upstreamNames The upstream names to modify. Upstream name is a unique identifier for the routing status + * of a cluster in a deployment + * @param status The new status */ - void setGlobalRotationStatus(DeploymentId deployment, String upstreamName, EndpointStatus status); + void setGlobalRotationStatus(DeploymentId deployment, List<String> upstreamNames, EndpointStatus status); /** * Set the new status for an entire zone. diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java index bac34e73dc5..7246903a51b 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java @@ -1,8 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.integration.configserver; +import com.yahoo.config.provision.zone.RoutingMethod; + import java.util.List; import java.util.Objects; +import java.util.OptionalInt; /** * This represents a list of one or more names for a container cluster. @@ -14,11 +17,15 @@ public class ContainerEndpoint { private final String clusterId; private final String scope; private final List<String> names; + private final OptionalInt weight; + private final RoutingMethod routingMethod; - public ContainerEndpoint(String clusterId, String scope, List<String> names) { - this.clusterId = nonEmpty(clusterId, "message must be non-empty"); + public ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight, RoutingMethod routingMethod) { + this.clusterId = nonEmpty(clusterId, "clusterId must be non-empty"); this.scope = Objects.requireNonNull(scope, "scope must be non-null"); this.names = List.copyOf(Objects.requireNonNull(names, "names must be non-null")); + this.weight = Objects.requireNonNull(weight, "weight must be non-null"); + this.routingMethod = Objects.requireNonNull(routingMethod, "routingMethod must be non-null"); } /** ID of the cluster to which this points */ @@ -39,22 +46,34 @@ public class ContainerEndpoint { return names; } + /** The relative weight of this endpoint */ + public OptionalInt weight() { + return weight; + } + + /** The routing method used by this endpoint */ + public RoutingMethod routingMethod() { + return routingMethod; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ContainerEndpoint that = (ContainerEndpoint) o; - return clusterId.equals(that.clusterId) && scope.equals(that.scope) && names.equals(that.names); + return clusterId.equals(that.clusterId) && scope.equals(that.scope) && names.equals(that.names) && weight.equals(that.weight) && routingMethod == that.routingMethod; } @Override public int hashCode() { - return Objects.hash(clusterId, scope, names); + return Objects.hash(clusterId, scope, names, weight, routingMethod); } @Override public String toString() { - return "container endpoint for " + clusterId + ": " + names + " [scope=" + scope + "]"; + return "container endpoint for cluster " + clusterId + ": " + String.join(", ", names) + + " [method=" + routingMethod + ",scope=" + scope + ",weight=" + + weight.stream().boxed().map(Object::toString).findFirst().orElse("<none>") + "]"; } private static String nonEmpty(String s, String message) { diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java index 9b44c984324..a5938c3b6b5 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java @@ -27,9 +27,14 @@ public interface ApplicationStore { /** Removes diffs for packages before the given build number */ void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber); - /** Find application package by given build number */ + /** Find prod application package by given build number */ Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber); + /** Whether the prod application package with the given number is stored. */ + default boolean hasBuild(TenantName tenant, ApplicationName application, long buildNumber) { + return find(tenant, application, buildNumber).isPresent(); + } + /** Stores the given tenant application package of the given version and diff since previous version. */ void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff); diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java index 1f387a49a68..a0dee6c059f 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java @@ -73,21 +73,21 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false); } - /** Creates an version from a completed build, an author email, and build meta data. */ + /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail, Version compileVersion, Instant buildTime) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail), Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty(), false); } - /** Creates an version from a completed build, an author email, and build meta data. */ + /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(Optional<SourceRevision> source, long buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly) { return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly); } - /** Returns an unique identifier for this version or "unknown" if version is not known */ + /** Returns a unique identifier for this version or "unknown" if version is not known */ public String id() { if (isUnknown()) return "unknown"; diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java index 1551ecbb7ca..71f08f7318d 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java @@ -37,8 +37,6 @@ public class NodeRepositoryNode { private Set<String> additionalIpAddresses; @JsonProperty("additionalHostnames") private List<String> additionalHostnames; - @JsonProperty("openStackId") - private String openStackId; @JsonProperty("flavor") private String flavor; @JsonProperty("resources") @@ -162,14 +160,6 @@ public class NodeRepositoryNode { this.additionalHostnames = additionalHostnames; } - public String getOpenStackId() { - return openStackId; - } - - public void setOpenStackId(String openStackId) { - this.openStackId = openStackId; - } - public String getFlavor() { return flavor; } @@ -348,6 +338,10 @@ public class NodeRepositoryNode { return orchestratorStatus; } + public void setOrchestratorStatus(String orchestratorStatus) { + this.orchestratorStatus = orchestratorStatus; + } + public Long suspendedSinceMillis() { return suspendedSinceMillis; } @@ -447,7 +441,6 @@ public class NodeRepositoryNode { ", ipAddresses=" + ipAddresses + ", additionalIpAddresses=" + additionalIpAddresses + ", additionalHostnames=" + additionalHostnames + - ", openStackId='" + openStackId + '\'' + ", flavor='" + flavor + '\'' + ", resources=" + resources + ", requestedResources=" + requestedResources + diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainer.java new file mode 100644 index 00000000000..97a15b421c5 --- /dev/null +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainer.java @@ -0,0 +1,20 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.api.integration.user; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.vespa.hosted.controller.tenant.Tenant; + +import java.util.List; + +/** + * @author olaa + */ +public interface RoleMaintainer { + + /** Given the set of all existing tenants and applications, delete any superflous roles */ + void deleteLeftoverRoles(List<Tenant> tenants, List<ApplicationId> applications); + + /** Finds the subset of tenants that should be deleted based on role/domain existence */ + List<Tenant> tenantsToDelete(List<Tenant> tenants); + +} diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainerMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainerMock.java new file mode 100644 index 00000000000..a2b6ad612da --- /dev/null +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/RoleMaintainerMock.java @@ -0,0 +1,30 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.api.integration.user; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.vespa.hosted.controller.tenant.Tenant; + +import java.util.ArrayList; +import java.util.List; + +/** + * @author olaa + */ +public class RoleMaintainerMock implements RoleMaintainer { + + private List<Tenant> tenantsToDelete = new ArrayList<>(); + + @Override + public void deleteLeftoverRoles(List<Tenant> tenants, List<ApplicationId> applications) { + + } + + @Override + public List<Tenant> tenantsToDelete(List<Tenant> tenants) { + return tenantsToDelete; + } + + public void mockTenantToDelete(Tenant tenant) { + tenantsToDelete.add(tenant); + } +} diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java index f1e5f4ebd9d..a739a8e2b01 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java @@ -25,7 +25,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -73,7 +72,7 @@ public class SystemFlagsDataArchive { if (!entry.isDirectory() && name.startsWith("flags/")) { Path filePath = Paths.get(name); String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8); - addFile(builder, rawData, filePath, Set.of()); + addFile(builder, rawData, filePath, Set.of(), null); } } return builder.build(); @@ -102,7 +101,7 @@ public class SystemFlagsDataArchive { if (!Files.isDirectory(absolutePath) && relativePath.startsWith("flags")) { String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8)); - addFile(builder, rawData, relativePath, filenamesForSystem); + addFile(builder, rawData, relativePath, filenamesForSystem, systemDefinition); } }); return builder.build(); @@ -169,12 +168,17 @@ public class SystemFlagsDataArchive { .collect(Collectors.toSet()); } - private static void addFile(Builder builder, String rawData, Path filePath, Set<String> filenamesForSystem) { + private static void addFile(Builder builder, String rawData, Path filePath, Set<String> filenamesForSystem, + ZoneRegistry systemDefinition) { String filename = filePath.getFileName().toString(); if (filename.startsWith(".")) { return; // Ignore files starting with '.' } if (!filenamesForSystem.isEmpty() && !filenamesForSystem.contains(filename)) { + if (systemDefinition != null && filename.startsWith(systemDefinition.system().value() + '.')) { + throw new IllegalArgumentException(String.format( + "Environment or zone in filename '%s' is does not exist", filename)); + } return; // Ignore files irrelevant for system } if (!filename.endsWith(".json")) { diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java index 6564ddef81f..d1df9b095d5 100644 --- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java +++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java @@ -130,6 +130,15 @@ public class SystemFlagsDataArchiveTest { } @Test + public void throws_exception_on_unknown_region() { + Path directory = Paths.get("src/test/resources/system-flags-with-unknown-file-name/"); + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "Environment or zone in filename 'main.prod.unknown-region.json' is does not exist"); + SystemFlagsDataArchive.fromDirectoryAndSystem(directory, createZoneRegistryMock()); + } + + @Test public void throws_on_unknown_field() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java index df0c727ec24..49939f4bfd2 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java @@ -351,6 +351,7 @@ public class ApplicationController { TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); + DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; @@ -364,7 +365,7 @@ public class ApplicationController { Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); - ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(new DeploymentId(job.application(), zone), revision)); + ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); @@ -376,8 +377,7 @@ public class ApplicationController { applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); - - containerEndpoints = controller.routing().containerEndpointsOf(application, job.application().instance(), zone); + containerEndpoints = controller.routing().of(deployment).prepare(application); } // Release application lock while doing the deployment, which is a lengthy task. @@ -391,7 +391,7 @@ public class ApplicationController { // For direct deployments use the full deployment ID, but otherwise use just the tenant and application as // the source since it's the same application, so it should have the same warnings NotificationSource source = zone.environment().isManuallyDeployed() ? - NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId); + NotificationSource.from(deployment) : NotificationSource.from(applicationId); List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) @@ -476,6 +476,7 @@ public class ApplicationController { ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { + DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag @@ -490,7 +491,7 @@ public class ApplicationController { .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) - controller.applications().applicationStore().putMeta(new DeploymentId(application, zone), + controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); @@ -502,9 +503,9 @@ public class ApplicationController { .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); - List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream() - .map(SupportAccessGrant::certificate) - .collect(toList()); + List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() + .map(SupportAccessGrant::certificate) + .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, @@ -515,10 +516,10 @@ public class ApplicationController { return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { - // Even if prepare fails, a load balancer may have been provisioned. Always refresh routing policies so that - // any DNS updates can be propagated as early as possible. - if ( ! application.instance().isTester()) - controller.routing().policies().refresh(application, applicationPackage.deploymentSpec(), zone); + // Even if prepare fails, routing configuration may need to be updated + if ( ! application.instance().isTester()) { + controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); + } } } @@ -560,6 +561,10 @@ public class ApplicationController { * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { + deleteApplication(id, Optional.of(credentials)); + } + + public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) @@ -579,7 +584,7 @@ public class ApplicationController { applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); - accessControl.deleteApplication(id, credentials); + credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); @@ -702,7 +707,7 @@ public class ApplicationController { try { configServer.deactivate(id); } finally { - controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone); + controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java index ea2bcfcac4b..6e31c93dbdd 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java @@ -14,7 +14,7 @@ import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentActivity; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.application.QuotaUsage; -import com.yahoo.vespa.hosted.controller.rotation.RotationStatus; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationStatus; import java.time.Instant; import java.util.Collection; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java index 2f5b92ca4c1..943d6ac7b18 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java @@ -18,7 +18,6 @@ import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.flags.BooleanFlag; import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.Flags; -import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint; import com.yahoo.vespa.hosted.controller.api.integration.dns.Record; @@ -31,10 +30,17 @@ import com.yahoo.vespa.hosted.controller.application.EndpointList; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue.Priority; -import com.yahoo.vespa.hosted.controller.rotation.RotationLock; -import com.yahoo.vespa.hosted.controller.rotation.RotationRepository; import com.yahoo.vespa.hosted.controller.routing.RoutingId; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicies; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext.ExclusiveDeploymentRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext.SharedDeploymentRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.ExclusiveZoneRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.RoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.SharedZoneRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.rotation.Rotation; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationLock; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationRepository; import com.yahoo.vespa.hosted.rotation.config.RotationsConfig; import java.nio.charset.StandardCharsets; @@ -44,11 +50,12 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; import java.util.Set; import java.util.TreeMap; import java.util.stream.Collectors; @@ -77,6 +84,25 @@ public class RoutingController { this.hideSharedRoutingEndpoint = Flags.HIDE_SHARED_ROUTING_ENDPOINT.bindTo(controller.flagSource()); } + /** Create a routing context for given deployment */ + public DeploymentRoutingContext of(DeploymentId deployment) { + if (usesSharedRouting(deployment.zoneId())) { + return new SharedDeploymentRoutingContext(deployment, + this, + controller.serviceRegistry().configServer(), + controller.clock()); + } + return new ExclusiveDeploymentRoutingContext(deployment, this); + } + + /** Create a routing context for given zone */ + public RoutingContext of(ZoneId zone) { + if (usesSharedRouting(zone)) { + return new SharedZoneRoutingContext(zone, controller.serviceRegistry().configServer()); + } + return new ExclusiveZoneRoutingContext(zone, routingPolicies); + } + public RoutingPolicies policies() { return routingPolicies; } @@ -92,7 +118,7 @@ public class RoutingController { // Avoid reading application more than once per call to this Supplier<DeploymentSpec> deploymentSpec = Suppliers.memoize(() -> controller.applications().requireApplication(TenantAndApplicationId.from(deployment.applicationId())).deploymentSpec()); // To discover the cluster name for a zone-scoped endpoint, we need to read routing policies - for (var policy : routingPolicies.get(deployment).values()) { + for (var policy : routingPolicies.read(deployment)) { if (!policy.status().isActive()) continue; for (var routingMethod : controller.zoneRegistry().routingMethods(policy.id().zone())) { if (routingMethod.isDirect() && !isSystemApplication && !canRouteDirectlyTo(deployment, deploymentSpec.get())) continue; @@ -217,43 +243,6 @@ public class RoutingController { return Collections.unmodifiableList(endpointDnsNames); } - /** Change status of all global endpoints for given deployment */ - public void setGlobalRotationStatus(DeploymentId deployment, EndpointStatus status) { - readDeclaredEndpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> { - try { - controller.serviceRegistry().configServer().setGlobalRotationStatus(deployment, endpoint.upstreamIdOf(deployment), status); - } catch (Exception e) { - throw new RuntimeException("Failed to set rotation status of " + endpoint + " in " + deployment, e); - } - }); - } - - /** Get global endpoint status for given deployment */ - public Map<Endpoint, EndpointStatus> globalRotationStatus(DeploymentId deployment) { - var routingEndpoints = new LinkedHashMap<Endpoint, EndpointStatus>(); - readDeclaredEndpointsOf(deployment.applicationId()).requiresRotation().primary().ifPresent(endpoint -> { - var upstreamName = endpoint.upstreamIdOf(deployment); - var status = controller.serviceRegistry().configServer().getGlobalRotationStatus(deployment, upstreamName); - routingEndpoints.put(endpoint, status); - }); - return Collections.unmodifiableMap(routingEndpoints); - } - - /** - * Assigns one or more global rotations to given application, if eligible. The given application is implicitly - * stored, ensuring that the assigned rotation(s) are persisted when this returns. - */ - private LockedApplication assignRotations(LockedApplication application, InstanceName instanceName) { - try (RotationLock rotationLock = rotationRepository.lock()) { - var rotations = rotationRepository.getOrAssignRotations(application.get().deploymentSpec(), - application.get().require(instanceName), - rotationLock); - application = application.with(instanceName, instance -> instance.with(rotations)); - controller.applications().store(application); // store assigned rotation even if deployment fails - } - return application; - } - /** Returns the global and application-level endpoints for given deployment, as container endpoints */ public Set<ContainerEndpoint> containerEndpointsOf(LockedApplication application, InstanceName instanceName, ZoneId zone) { // Assign rotations to application @@ -271,7 +260,6 @@ public class RoutingController { EndpointList endpoints = declaredEndpointsOf(application.get()).targets(deployment); EndpointList globalEndpoints = endpoints.scope(Endpoint.Scope.global); for (var assignedRotation : instance.rotations()) { - var names = new ArrayList<String>(); EndpointList rotationEndpoints = globalEndpoints.named(assignedRotation.endpointId()) .requiresRotation(); @@ -286,21 +274,21 @@ public class RoutingController { } // Register names in DNS - var rotation = rotationRepository.getRotation(assignedRotation.rotationId()); - if (rotation.isPresent()) { - rotationEndpoints.forEach(endpoint -> { - controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()), - RecordData.fqdn(rotation.get().name()), - Priority.normal); - names.add(endpoint.dnsName()); - }); + Rotation rotation = rotationRepository.requireRotation(assignedRotation.rotationId()); + for (var endpoint : rotationEndpoints) { + controller.nameServiceForwarder().createCname(RecordName.from(endpoint.dnsName()), + RecordData.fqdn(rotation.name()), + Priority.normal); + List<String> names = List.of(endpoint.dnsName(), + // Include rotation ID as a valid name of this container endpoint + // (required by global routing health checks) + assignedRotation.rotationId().asString()); + containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), + asString(Endpoint.Scope.global), + names, + OptionalInt.empty(), + endpoint.routingMethod())); } - - // Include rotation ID as a valid name of this container endpoint (required by global routing health checks) - names.add(assignedRotation.rotationId().asString()); - containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(), - asString(Endpoint.Scope.global), - names)); } // Add endpoints not backed by a rotation (i.e. other routing methods so that the config server always knows // about global names, even when not using rotations) @@ -309,7 +297,9 @@ public class RoutingController { .forEach((clusterId, clusterEndpoints) -> { containerEndpoints.add(new ContainerEndpoint(clusterId.value(), asString(Endpoint.Scope.global), - clusterEndpoints.mapToList(Endpoint::dnsName))); + clusterEndpoints.mapToList(Endpoint::dnsName), + OptionalInt.empty(), + RoutingMethod.exclusive)); }); // Add application endpoints EndpointList applicationEndpoints = endpoints.scope(Endpoint.Scope.application); @@ -327,12 +317,22 @@ public class RoutingController { RecordData.fqdn(vipHostname), Priority.normal); } - applicationEndpoints.groupingBy(Endpoint::cluster) - .forEach((clusterId, clusterEndpoints) -> { - containerEndpoints.add(new ContainerEndpoint(clusterId.value(), - asString(Endpoint.Scope.application), - clusterEndpoints.mapToList(Endpoint::dnsName))); - }); + Map<ClusterSpec.Id, EndpointList> applicationEndpointsByCluster = applicationEndpoints.groupingBy(Endpoint::cluster); + for (var kv : applicationEndpointsByCluster.entrySet()) { + ClusterSpec.Id clusterId = kv.getKey(); + EndpointList clusterEndpoints = kv.getValue(); + for (var endpoint : clusterEndpoints) { + Optional<Endpoint.Target> matchingTarget = endpoint.targets().stream() + .filter(t -> t.routesTo(deployment)) + .findFirst(); + if (matchingTarget.isEmpty()) throw new IllegalStateException("No target found routing to " + deployment + " in " + endpoint); + containerEndpoints.add(new ContainerEndpoint(clusterId.value(), + asString(Endpoint.Scope.application), + List.of(endpoint.dnsName()), + OptionalInt.of(matchingTarget.get().weight()), + endpoint.routingMethod())); + } + } return Collections.unmodifiableSet(containerEndpoints); } @@ -355,6 +355,32 @@ public class RoutingController { Priority.normal)); } + /** Returns direct routing endpoints if any exist and feature flag is set for given application */ + // TODO: Remove this when feature flag is removed, and in-line .direct() filter where relevant + public EndpointList directEndpoints(EndpointList endpoints, ApplicationId application) { + boolean hideSharedEndpoint = hideSharedRoutingEndpoint.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()).value(); + EndpointList directEndpoints = endpoints.direct(); + if (hideSharedEndpoint && !directEndpoints.isEmpty()) { + return directEndpoints; + } + return endpoints; + } + + /** + * Assigns one or more global rotations to given application, if eligible. The given application is implicitly + * stored, ensuring that the assigned rotation(s) are persisted when this returns. + */ + private LockedApplication assignRotations(LockedApplication application, InstanceName instanceName) { + try (RotationLock rotationLock = rotationRepository.lock()) { + var rotations = rotationRepository.getOrAssignRotations(application.get().deploymentSpec(), + application.get().require(instanceName), + rotationLock); + application = application.with(instanceName, instance -> instance.with(rotations)); + controller.applications().store(application); // store assigned rotation even if deployment fails + } + return application; + } + private boolean usesSharedRouting(ZoneId zone) { return controller.zoneRegistry().routingMethods(zone).stream().anyMatch(RoutingMethod::isShared); } @@ -364,8 +390,8 @@ public class RoutingController { var deploymentsByMethod = new HashMap<RoutingMethod, Set<DeploymentId>>(); for (var deployment : deployments) { for (var method : controller.zoneRegistry().routingMethods(deployment.zoneId())) { - deploymentsByMethod.putIfAbsent(method, new LinkedHashSet<>()); - deploymentsByMethod.get(method).add(deployment); + deploymentsByMethod.computeIfAbsent(method, k -> new LinkedHashSet<>()) + .add(deployment); } } var routingMethods = new ArrayList<RoutingMethod>(); @@ -442,23 +468,12 @@ public class RoutingController { } /** Create a common name based on a hash of given application. This must be less than 64 characters long. */ - private String commonNameHashOf(ApplicationId application, SystemName system) { + private static String commonNameHashOf(ApplicationId application, SystemName system) { HashCode sha1 = Hashing.sha1().hashString(application.serializedForm(), StandardCharsets.UTF_8); String base32 = BaseEncoding.base32().omitPadding().lowerCase().encode(sha1.asBytes()); return 'v' + base32 + Endpoint.internalDnsSuffix(system); } - /** Returns direct routing endpoints if any exist and feature flag is set for given application */ - // TODO: Remove this when feature flag is removed, and in-line .direct() filter where relevant - public EndpointList directEndpoints(EndpointList endpoints, ApplicationId application) { - boolean hideSharedEndpoint = hideSharedRoutingEndpoint.with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()).value(); - EndpointList directEndpoints = endpoints.direct(); - if (hideSharedEndpoint && !directEndpoints.isEmpty()) { - return directEndpoints; - } - return endpoints; - } - private static String asString(Endpoint.Scope scope) { switch (scope) { case application: return "application"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java index 537603427f5..59877fce634 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java @@ -161,7 +161,7 @@ public class TenantController { } /** Deletes the given tenant. */ - public void delete(TenantName tenant, Supplier<Credentials> credentials, boolean forget) { + public void delete(TenantName tenant, Optional<Credentials> credentials, boolean forget) { try (Lock lock = lock(tenant)) { Tenant oldTenant = get(tenant, true) .orElseThrow(() -> new NotExistsException("Could not delete tenant '" + tenant + "': Tenant not found")); @@ -171,7 +171,7 @@ public class TenantController { throw new IllegalArgumentException("Could not delete tenant '" + tenant.value() + "': This tenant has active applications"); - accessControl.deleteTenant(tenant, credentials.get()); + credentials.ifPresent(creds -> accessControl.deleteTenant(tenant, creds)); controller.notificationsDb().removeNotifications(NotificationSource.from(tenant)); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java index 1596456b7cc..ab9304e75f3 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java @@ -3,7 +3,7 @@ package com.yahoo.vespa.hosted.controller.application; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.RegionName; -import com.yahoo.vespa.hosted.controller.rotation.RotationId; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationId; import java.util.Collection; import java.util.Objects; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java index aee7c1052be..8f37d287c1a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java @@ -133,10 +133,10 @@ public class Endpoint { return routingMethod.isShared() && scope == Scope.global; } - /** Returns the upstream ID of given deployment. This *must* match what the routing layer generates */ - public String upstreamIdOf(DeploymentId deployment) { + /** Returns the upstream name of given deployment. This *must* match what the routing layer generates */ + public String upstreamName(DeploymentId deployment) { if (!routingMethod.isShared()) throw new IllegalArgumentException("Routing method " + routingMethod + " does not have upstream name"); - return upstreamIdOf(cluster.value(), deployment.applicationId(), deployment.zoneId()); + return upstreamName(cluster.value(), deployment.applicationId(), deployment.zoneId()); } @Override @@ -269,7 +269,7 @@ public class Endpoint { return suffix; } - private static String upstreamIdOf(String name, ApplicationId application, ZoneId zone) { + private static String upstreamName(String name, ApplicationId application, ZoneId zone) { return Stream.of(namePart(name, ""), instancePart(Optional.of(application.instance()), ""), application.application().value(), @@ -482,6 +482,11 @@ public class Endpoint { return weight; } + /** Returns whether this routes to given deployment */ + public boolean routesTo(DeploymentId deployment) { + return this.deployment.equals(deployment); + } + } public static class EndpointBuilder { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java index f9fd02fbf56..7fe8d554998 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java @@ -53,7 +53,7 @@ public class EndpointList extends AbstractFilteringList<Endpoint, EndpointList> return matching(endpoint -> endpoint.deployments().containsAll(deployments)); } - /** Returns the subset of endpoints which target the given deployments */ + /** Returns the subset of endpoints which target the given deployment */ public EndpointList targets(DeploymentId deployment) { return targets(List.of(deployment)); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java index 9767ef59252..b088c2fd0fd 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java @@ -45,7 +45,7 @@ import static java.util.stream.Collectors.toMap; /** * A representation of the content of an application package. - * Only the deployment.xml content can be accessed as anything other than compressed data. + * Only meta-data content can be accessed as anything other than compressed data. * A package is identified by a hash of the content. * * This is immutable. @@ -98,8 +98,6 @@ public class ApplicationPackage { this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); - if (requireFiles && buildMetaObject.isEmpty()) - throw new IllegalArgumentException("Missing required file '" + buildMetaFile + "'"); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java index 3892ceeddf9..4e8f17b6098 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java @@ -277,8 +277,13 @@ public class DeploymentTrigger { /** Overrides the given instance's platform and application changes with any contained in the given change. */ public void forceChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { - applications().store(application.with(instanceId.instance(), - instance -> instance.withChange(change.onTopOf(application.get().require(instanceId.instance()).change())))); + Change newChange = change.onTopOf(application.get().require(instanceId.instance()).change()); + application = application.with(instanceId.instance(), + instance -> instance.withChange(newChange)); + DeploymentStatus newStatus = jobs.deploymentStatus(application.get()); + application = application.with(instanceId.instance(), + instance -> instance.withChange(remainingChange(instance, newStatus))); + applications().store(application); }); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java index 94f6cccb3a5..e28273870d7 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java @@ -41,15 +41,16 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentFailureMails; import com.yahoo.vespa.hosted.controller.api.integration.organization.Mail; import com.yahoo.vespa.hosted.controller.application.ActivateResult; -import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.Endpoint; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.config.ControllerConfig; import com.yahoo.vespa.hosted.controller.maintenance.JobRunner; import com.yahoo.vespa.hosted.controller.notification.Notification; import com.yahoo.vespa.hosted.controller.notification.NotificationSource; -import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId; +import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; import com.yahoo.yolean.Exceptions; import javax.security.auth.x500.X500Principal; @@ -69,6 +70,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.UUID; import java.util.function.Consumer; import java.util.function.Supplier; import java.util.logging.Level; @@ -347,9 +349,15 @@ public class InternalStepRunner implements StepRunner { String failureReason = null; - NodeList suspendedTooLong = nodeList.suspendedSince(controller.clock().instant().minus(timeouts.nodesDown())); + NodeList suspendedTooLong = nodeList + .isStateful() + .suspendedSince(controller.clock().instant().minus(timeouts.statefulNodesDown())) + .and(nodeList + .not().isStateful() + .suspendedSince(controller.clock().instant().minus(timeouts.statelessNodesDown())) + ); if ( ! suspendedTooLong.isEmpty()) { - failureReason = "Some nodes have been suspended for more than " + timeouts.nodesDown().toMinutes() + " minutes:\n" + + failureReason = "Some nodes have been suspended for more than the allowed threshold:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } @@ -477,12 +485,12 @@ public class InternalStepRunner implements StepRunner { } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { - var endpoints = controller.routing().readZoneEndpointsOf(Set.of(new DeploymentId(id, zone))); + DeploymentId deployment = new DeploymentId(id, zone); + Map<ZoneId, List<Endpoint>> endpoints = controller.routing().readZoneEndpointsOf(Set.of(deployment)); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } - var policies = controller.routing().policies().get(new DeploymentId(id, zone)); for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); @@ -490,10 +498,10 @@ public class InternalStepRunner implements StepRunner { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } - if (endpoint.routingMethod() == RoutingMethod.exclusive) { - var policy = policies.get(new RoutingPolicyId(id, ClusterSpec.Id.from(endpoint.name()), zone)); - if (policy == null) - throw new IllegalStateException(endpoint + " has no matching policy in " + policies); + DeploymentRoutingContext context = controller.routing().of(deployment); + if (context.routingMethod() == RoutingMethod.exclusive) { + RoutingPolicy policy = context.routingPolicy(ClusterSpec.Id.from(endpoint.name())) + .orElseThrow(() -> new IllegalStateException(endpoint + " has no matching policy")); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { @@ -858,8 +866,12 @@ public class InternalStepRunner implements StepRunner { spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); - try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { + try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + deploymentXml.length + 1000)) { + // Copy contents of submitted application-test.zip, and ensure required directories exist within the zip. zipBuilder.add(testPackage); + zipBuilder.add("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]); + zipBuilder.add("tests/.ignore-" + UUID.randomUUID(), new byte[0]); + zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) @@ -947,6 +959,7 @@ public class InternalStepRunner implements StepRunner { " <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + + " <testsPath>tests</testsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + @@ -1035,7 +1048,8 @@ public class InternalStepRunner implements StepRunner { Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } - Duration nodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } + Duration statelessNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } + Duration statefulNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 720); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java index ebda767d79e..fe4b3956f9b 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java @@ -565,13 +565,18 @@ public class JobController { for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { - deactivateTester(tester, type); + try { + deactivateTester(tester, type); + } + catch (Exception e) { + // It's probably already deleted, so if we fail, that's OK. + } curator.deleteRunData(id, type); logs.delete(id); } }); } - catch (TimeoutException e) { + catch (Exception e) { return; // Don't remove the data if we couldn't clean up all resources. } curator.deleteRunData(id); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeList.java index 12c226241e1..cb0ff0644fa 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeList.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeList.java @@ -87,6 +87,10 @@ public class NodeList extends AbstractFilteringList<NodeWithServices, NodeList> return matching(NodeWithServices::needsNewConfig); } + public NodeList isStateful() { + return matching(NodeWithServices::isStateful); + } + /** The nodes that are retiring. */ public NodeList retiring() { return matching(node -> node.node().retired()); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeWithServices.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeWithServices.java index bd589af190e..d8f88d31759 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeWithServices.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/NodeWithServices.java @@ -82,6 +82,10 @@ public class NodeWithServices { return services.stream().anyMatch(service -> wantedConfigGeneration > service.currentGeneration()); } + public boolean isStateful() { + return node.clusterType() == Node.ClusterType.content || node.clusterType() == Node.ClusterType.combined; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventTracker.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventTracker.java index 98e9fc7c159..021c02fb6a0 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventTracker.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventTracker.java @@ -3,10 +3,9 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.config.provision.CloudName; import com.yahoo.config.provision.zone.ZoneApi; -import com.yahoo.text.Text; import com.yahoo.vespa.hosted.controller.Controller; -import com.yahoo.vespa.hosted.controller.api.integration.aws.CloudEventFetcher; import com.yahoo.vespa.hosted.controller.api.integration.aws.CloudEvent; +import com.yahoo.vespa.hosted.controller.api.integration.aws.CloudEventFetcher; import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter; import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository; @@ -40,11 +39,7 @@ public class CloudEventTracker extends ControllerMaintainer { @Override protected double maintain() { for (var region : zonesByCloudNativeRegion.keySet()) { - List<CloudEvent> events = eventFetcher.getEvents(region); - for (var event : events) { - log.info(Text.format("Retrieved event %s, affecting the following instances: %s", - event.instanceEventId, - event.affectedInstances)); + for (var event : eventFetcher.getEvents(region)) { deprovisionAffectedHosts(region, event); } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java index f11cd78c303..913d6dfeab8 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java @@ -74,7 +74,7 @@ public class ControllerMaintenance extends AbstractComponent { maintainers.add(new VcmrMaintainer(controller, intervals.vcmrMaintainer)); maintainers.add(new CloudTrialExpirer(controller, intervals.defaultInterval)); maintainers.add(new RetriggerMaintainer(controller, intervals.retriggerMaintainer)); - maintainers.add(new UserManagementMaintainer(controller, intervals.userManagementMaintainer, userManagement)); + maintainers.add(new UserManagementMaintainer(controller, intervals.userManagementMaintainer, controller.serviceRegistry().roleMaintainer())); } public Upgrader upgrader() { return upgrader; } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java index 47df7a9da92..2939d10f99e 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java @@ -16,7 +16,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.auditlog.AuditLog; import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatusList; import com.yahoo.vespa.hosted.controller.deployment.JobList; -import com.yahoo.vespa.hosted.controller.rotation.RotationLock; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationLock; import com.yahoo.vespa.hosted.controller.versions.NodeVersion; import com.yahoo.vespa.hosted.controller.versions.VersionStatus; import com.yahoo.vespa.hosted.controller.versions.VespaVersion; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java index 1d5d444a32c..5acb21917eb 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; @@ -25,7 +26,8 @@ public class SystemRoutingPolicyMaintainer extends ControllerMaintainer { for (var zone : controller().zoneRegistry().zones().reachable().ids()) { for (var application : SystemApplication.values()) { if (!application.hasEndpoint()) continue; - controller().routing().policies().refresh(application.id(), DeploymentSpec.empty, zone); + DeploymentId deployment = new DeploymentId(application.id(), zone); + controller().routing().of(deployment).configure(DeploymentSpec.empty); } } return 1.0; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java index 5f6f917bc75..05a7e2368d1 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java @@ -1,65 +1,53 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.maintenance; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.SystemName; -import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; -import com.yahoo.vespa.hosted.controller.api.integration.user.Roles; -import com.yahoo.vespa.hosted.controller.api.integration.user.UserManagement; -import com.yahoo.vespa.hosted.controller.api.role.ApplicationRole; -import com.yahoo.vespa.hosted.controller.api.role.Role; -import com.yahoo.vespa.hosted.controller.api.role.TenantRole; +import com.yahoo.vespa.hosted.controller.api.integration.user.RoleMaintainer; import java.time.Duration; -import java.util.List; +import java.util.Optional; import java.util.logging.Logger; import java.util.stream.Collectors; /** * Maintains user management resources. - * For now, ensures there's no discrepnacy between expected tenant/application roles and Auth0 roles + * For now, ensures there's no discrepnacy between expected tenant/application roles and auth0/athenz roles * * @author olaa */ public class UserManagementMaintainer extends ControllerMaintainer { - private final UserManagement userManagement; - + private final RoleMaintainer roleMaintainer; private static final Logger logger = Logger.getLogger(UserManagementMaintainer.class.getName()); - public UserManagementMaintainer(Controller controller, Duration interval, UserManagement userManagement) { - super(controller, interval, UserManagementMaintainer.class.getSimpleName(), SystemName.allOf(SystemName::isPublic)); - this.userManagement = userManagement; - + public UserManagementMaintainer(Controller controller, Duration interval, RoleMaintainer roleMaintainer) { + super(controller, interval); + this.roleMaintainer = roleMaintainer; } @Override protected double maintain() { - findLeftoverRoles().forEach(role -> { - logger.warning(String.format("Found unexpected %s - Deleting", role.toString())); - userManagement.deleteRole(role); - }); - return 1.0; - } - - // protected for testing - protected List<Role> findLeftoverRoles() { - var tenantRoles = controller().tenants().asList() + var tenants = controller().tenants().asList(); + var applications = controller().applications().idList() .stream() - .flatMap(tenant -> Roles.tenantRoles(tenant.name()).stream()) + .map(appId -> ApplicationId.from(appId.tenant(), appId.application(), InstanceName.defaultName())) .collect(Collectors.toList()); + roleMaintainer.deleteLeftoverRoles(tenants, applications); + + if (!controller().system().isPublic()) { + roleMaintainer.tenantsToDelete(tenants) + .forEach(tenant -> { + logger.warning(tenant.name() + " has a non-existing Athenz domain. Deleting"); + controller().applications().asList(tenant.name()) + .forEach(application -> controller().applications().deleteApplication(application.id(), Optional.empty())); + controller().tenants().delete(tenant.name(), Optional.empty(), false); + }); + } - var applicationRoles = controller().applications().asList() - .stream() - .map(Application::id) - .flatMap(applicationId -> Roles.applicationRoles(applicationId.tenant(), applicationId.application()).stream()) - .collect(Collectors.toList()); - - return userManagement.listRoles().stream() - .peek(role -> logger.fine(role::toString)) - .filter(role -> role instanceof TenantRole || role instanceof ApplicationRole) - .filter(role -> !tenantRoles.contains(role) && !applicationRoles.contains(role)) - .collect(Collectors.toList()); + return 1.0; } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java index e8a7f7729fb..4b060846090 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java @@ -31,9 +31,9 @@ import com.yahoo.vespa.hosted.controller.application.EndpointId; import com.yahoo.vespa.hosted.controller.application.QuotaUsage; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics; -import com.yahoo.vespa.hosted.controller.rotation.RotationId; -import com.yahoo.vespa.hosted.controller.rotation.RotationState; -import com.yahoo.vespa.hosted.controller.rotation.RotationStatus; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationId; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationState; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationStatus; import java.security.PublicKey; import java.time.Instant; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java index e10dcfd3b3b..c6dd8bab309 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java @@ -20,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; +import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.auditlog.AuditLog; import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry; @@ -27,11 +28,9 @@ import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntrySerializer; import com.yahoo.vespa.hosted.controller.deployment.Run; import com.yahoo.vespa.hosted.controller.deployment.Step; import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue; -import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest; import com.yahoo.vespa.hosted.controller.notification.Notification; -import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; -import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import com.yahoo.vespa.hosted.controller.routing.ZoneRoutingPolicy; import com.yahoo.vespa.hosted.controller.support.access.SupportAccess; import com.yahoo.vespa.hosted.controller.tenant.Tenant; @@ -514,19 +513,31 @@ public class CuratorDb { // -------------- Routing policies ---------------------------------------- - public void writeRoutingPolicies(ApplicationId application, Map<RoutingPolicyId, RoutingPolicy> policies) { + public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { + for (var policy : policies) { + if (!policy.id().owner().equals(application)) { + throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + + application.toShortString()); + } + } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } - public Map<ApplicationId, Map<RoutingPolicyId, RoutingPolicy>> readRoutingPolicies() { + public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { + return readRoutingPolicies((instance) -> true); + } + + public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) - .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); + .filter(filter) + .collect(Collectors.toUnmodifiableMap(Function.identity(), + this::readRoutingPolicies)); } - public Map<RoutingPolicyId, RoutingPolicy> readRoutingPolicies(ApplicationId application) { + public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) - .orElseGet(Map::of); + .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java index 04d1a4c7433..17337f823c0 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java @@ -11,15 +11,15 @@ import com.yahoo.slime.Inspector; import com.yahoo.slime.Slime; import com.yahoo.slime.SlimeUtils; import com.yahoo.vespa.hosted.controller.application.EndpointId; -import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import java.time.Instant; +import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; -import java.util.Map; +import java.util.List; import java.util.Set; /** @@ -50,11 +50,11 @@ public class RoutingPolicySerializer { private static final String changedAtField = "changedAt"; private static final String statusField = "status"; - public Slime toSlime(Map<RoutingPolicyId, RoutingPolicy> routingPolicies) { + public Slime toSlime(List<RoutingPolicy> routingPolicies) { var slime = new Slime(); var root = slime.setObject(); var policyArray = root.setArray(routingPoliciesField); - routingPolicies.values().forEach(policy -> { + routingPolicies.forEach(policy -> { var policyObject = policyArray.addObject(); policyObject.setString(clusterField, policy.id().cluster().value()); policyObject.setString(zoneField, policy.id().zone().value()); @@ -70,8 +70,8 @@ public class RoutingPolicySerializer { return slime; } - public Map<RoutingPolicyId, RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) { - var policies = new LinkedHashMap<RoutingPolicyId, RoutingPolicy>(); + public List<RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) { + List<RoutingPolicy> policies = new ArrayList<>(); var root = slime.get(); var field = root.field(routingPoliciesField); field.traverse((ArrayTraverser) (i, inspect) -> { @@ -82,15 +82,15 @@ public class RoutingPolicySerializer { RoutingPolicyId id = new RoutingPolicyId(owner, ClusterSpec.Id.from(inspect.field(clusterField).asString()), ZoneId.from(inspect.field(zoneField).asString())); - policies.put(id, new RoutingPolicy(id, - HostName.from(inspect.field(canonicalNameField).asString()), - SlimeUtils.optionalString(inspect.field(dnsZoneField)), - instanceEndpoints, - applicationEndpoints, - new RoutingPolicy.Status(inspect.field(loadBalancerActiveField).asBool(), - globalRoutingFromSlime(inspect.field(globalRoutingField))))); + policies.add(new RoutingPolicy(id, + HostName.from(inspect.field(canonicalNameField).asString()), + SlimeUtils.optionalString(inspect.field(dnsZoneField)), + instanceEndpoints, + applicationEndpoints, + new RoutingPolicy.Status(inspect.field(loadBalancerActiveField).asBool(), + globalRoutingFromSlime(inspect.field(globalRoutingField))))); }); - return Collections.unmodifiableMap(policies); + return Collections.unmodifiableList(policies); } public void globalRoutingToSlime(RoutingStatus routingStatus, Cursor object) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java index d9f0358e3b5..8af26f564a6 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java @@ -44,7 +44,6 @@ import com.yahoo.vespa.hosted.controller.Instance; import com.yahoo.vespa.hosted.controller.LockedTenant; import com.yahoo.vespa.hosted.controller.NotExistsException; import com.yahoo.vespa.hosted.controller.api.application.v4.EnvironmentResource; -import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus; import com.yahoo.vespa.hosted.controller.api.application.v4.model.ProtonMetrics; import com.yahoo.vespa.hosted.controller.api.application.v4.model.configserverbindings.RefeedAction; import com.yahoo.vespa.hosted.controller.api.application.v4.model.configserverbindings.RestartAction; @@ -97,10 +96,11 @@ import com.yahoo.vespa.hosted.controller.maintenance.ResourceMeterMaintainer; import com.yahoo.vespa.hosted.controller.notification.Notification; import com.yahoo.vespa.hosted.controller.notification.NotificationSource; import com.yahoo.vespa.hosted.controller.persistence.SupportAccessSerializer; -import com.yahoo.vespa.hosted.controller.rotation.RotationId; -import com.yahoo.vespa.hosted.controller.rotation.RotationState; -import com.yahoo.vespa.hosted.controller.rotation.RotationStatus; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationId; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationState; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationStatus; import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; import com.yahoo.vespa.hosted.controller.security.AccessControlRequests; import com.yahoo.vespa.hosted.controller.security.Credentials; import com.yahoo.vespa.hosted.controller.support.access.SupportAccess; @@ -1536,49 +1536,32 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } - - // The order here matters because setGlobalRotationStatus involves an external request that may fail. - // TODO(mpolden): Set only one of these when only one kind of global endpoints are supported per zone. - var deploymentId = new DeploymentId(instance.id(), zone); - setGlobalRotationStatus(deploymentId, inService, request); - setGlobalEndpointStatus(deploymentId, inService, request); - + DeploymentId deploymentId = new DeploymentId(instance.id(), zone); + RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; + RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; + controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } - /** Set the global endpoint status for given deployment. This only applies to global endpoints backed by a cloud service */ - private void setGlobalEndpointStatus(DeploymentId deployment, boolean inService, HttpRequest request) { - var agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; - var status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; - controller.routing().policies().setRoutingStatus(deployment, status, agent); - } - - /** Set the global rotation status for given deployment. This only applies to global endpoints backed by a rotation */ - private void setGlobalRotationStatus(DeploymentId deployment, boolean inService, HttpRequest request) { - var requestData = toSlime(request.getData()).get(); - var reason = mandatory("reason", requestData).asString(); - var agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; - long timestamp = controller.clock().instant().getEpochSecond(); - var status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; - var endpointStatus = new EndpointStatus(status, reason, agent.name(), timestamp); - controller.routing().setGlobalRotationStatus(deployment, endpointStatus); - } - private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); - controller.routing().globalRotationStatus(deploymentId) - .forEach((endpoint, status) -> { - array.addString(endpoint.upstreamIdOf(deploymentId)); - Cursor statusObject = array.addObject(); - statusObject.setString("status", status.getStatus().name()); - statusObject.setString("reason", status.getReason() == null ? "" : status.getReason()); - statusObject.setString("agent", status.getAgent() == null ? "" : status.getAgent()); - statusObject.setLong("timestamp", status.getEpoch()); - }); + Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) + .requiresRotation() + .primary(); + if (primaryEndpoint.isPresent()) { + DeploymentRoutingContext context = controller.routing().of(deploymentId); + RoutingStatus status = context.routingStatus(); + array.addString(primaryEndpoint.get().upstreamName(deploymentId)); + Cursor statusObject = array.addObject(); + statusObject.setString("status", status.value().name()); + statusObject.setString("reason", ""); + statusObject.setString("agent", status.agent().name()); + statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); + } return new SlimeJsonResponse(slime); } @@ -1793,15 +1776,37 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); + Inspector buildField = toSlime(request.getData()).get().field("build"); + long build = buildField.valid() ? buildField.asLong() : -1; + StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { - Change change = Change.of(application.get().latestVersion().get()); + ApplicationVersion version = build == -1 ? application.get().latestVersion().get() + : getApplicationVersion(application.get(), build); + Change change = Change.of(version); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } + private ApplicationVersion getApplicationVersion(Application application, Long build) { + // Check whether this is the latest version, and possibly return that. + // Otherwise, look through historic runs for a proper ApplicationVersion. + return application.latestVersion() + .filter(version -> version.buildNumber().stream().anyMatch(build::equals)) + .or(() -> controller.jobController().deploymentStatus(application).jobs() + .asList().stream() + .flatMap(job -> job.runs().values().stream()) + .map(run -> run.versions().targetApplication()) + .filter(version -> version.buildNumber().stream().anyMatch(build::equals)) + .findAny()) + .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), + application.id().application(), + build)) + .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); + } + /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); @@ -2013,9 +2018,9 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), - () -> accessControlRequests.credentials(TenantName.from(tenantName), + Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), - request.getJDiscRequest()), + request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java index 31800fb509b..88fd3a58d23 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java @@ -174,6 +174,7 @@ public class OsApiHandler extends AuditLoggingRequestHandler { Optional<OsVersionTarget> target = targets.stream().filter(t -> t.osVersion().equals(osVersion)).findFirst(); currentVersionObject.setBool("targetVersion", target.isPresent()); target.ifPresent(t -> currentVersionObject.setString("upgradeBudget", t.upgradeBudget().toString())); + target.ifPresent(t -> currentVersionObject.setLong("scheduledAt", t.scheduledAt().toEpochMilli())); currentVersionObject.setString("cloud", osVersion.cloud().value()); Cursor nodesArray = currentVersionObject.setArray("nodes"); nodeVersions.forEach(nodeVersion -> { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java index 45abf7f2946..226a7ca9561 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java @@ -19,26 +19,26 @@ import com.yahoo.slime.Slime; import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.Instance; -import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.role.Role; import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition; import com.yahoo.vespa.hosted.controller.api.role.SecurityContext; import com.yahoo.vespa.hosted.controller.application.Endpoint; +import com.yahoo.vespa.hosted.controller.application.EndpointList; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler; import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.context.RoutingContext; import com.yahoo.yolean.Exceptions; import java.net.URI; -import java.time.Instant; import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.logging.Level; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * This implements the /routing/v1 API, which provides operators and tenants routing control at both zone- (operator @@ -112,11 +112,8 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { var deploymentsStatus = deployments.stream() .collect(Collectors.toMap( deploymentId -> deploymentId, - deploymentId -> Stream.concat( - directGlobalRoutingStatus(deploymentId).stream(), - sharedGlobalRoutingStatus(deploymentId).stream() - ).collect(Collectors.toList()) - )); + deploymentId -> controller.routing().of(deploymentId).routingStatus()) + ); var slime = new Slime(); var root = slime.setObject(); @@ -125,11 +122,11 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { var endpointRoot = endpointsRoot.addObject(); endpointToSlime(endpointRoot, endpoint); var zonesRoot = endpointRoot.setArray("zones"); - endpoint.deployments().stream().sorted(Comparator.comparing(d -> d.zoneId().value())).forEach(deployment -> { - deploymentsStatus.getOrDefault(deployment, List.of()).forEach(status -> { - deploymentStatusToSlime(zonesRoot.addObject(), deployment, status, endpoint.routingMethod()); - }); - }); + endpoint.deployments().stream().sorted(Comparator.comparing(d -> d.zoneId().value())) + .forEach(deployment -> { + RoutingStatus status = deploymentsStatus.get(deployment); + deploymentStatusToSlime(zonesRoot.addObject(), deployment, status, endpoint.routingMethod()); + }); }); return new SlimeJsonResponse(slime); @@ -211,13 +208,10 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { } private HttpResponse setZoneStatus(Path path, boolean in) { - var zone = zoneFrom(path); - if (exclusiveRoutingIn(zone)) { - var status = in ? RoutingStatus.Value.in : RoutingStatus.Value.out; - controller.routing().policies().setRoutingStatus(zone, status); - } else { - controller.serviceRegistry().configServer().setGlobalRotationStatus(zone, in); - } + ZoneId zone = zoneFrom(path); + RoutingContext context = controller.routing().of(zone); + RoutingStatus.Value newStatus = in ? RoutingStatus.Value.in : RoutingStatus.Value.out; + context.setRoutingStatus(newStatus, RoutingStatus.Agent.operator); return new MessageResponse("Set global routing status for deployments in " + zone + " to " + (in ? "IN" : "OUT")); } @@ -231,16 +225,8 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { } private void toSlime(ZoneId zone, Cursor zoneObject) { - if (exclusiveRoutingIn(zone)) { - var zonePolicy = controller.routing().policies().get(zone); - zoneStatusToSlime(zoneObject, zonePolicy.zone(), zonePolicy.routingStatus(), RoutingMethod.exclusive); - } else { - // Rotation status per zone only exposes in/out status, no agent or time of change. - var in = controller.serviceRegistry().configServer().getGlobalRotationStatus(zone); - var globalRouting = new RoutingStatus(in ? RoutingStatus.Value.in : RoutingStatus.Value.out, - RoutingStatus.Agent.operator, Instant.EPOCH); - zoneStatusToSlime(zoneObject, zone, globalRouting, RoutingMethod.shared); - } + RoutingContext context = controller.routing().of(zone); + zoneStatusToSlime(zoneObject, zone, context.routingStatus(), context.routingMethod()); } private HttpResponse setDeploymentStatus(Path path, boolean in, HttpRequest request) { @@ -249,18 +235,7 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { var status = in ? RoutingStatus.Value.in : RoutingStatus.Value.out; var agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; requireDeployment(deployment, instance); - - if (sharedRoutingIn(deployment.zoneId())) { - // Set rotation status - var endpointStatus = new EndpointStatus(in ? EndpointStatus.Status.in : EndpointStatus.Status.out, - "", - agent.name(), - controller.clock().instant().getEpochSecond()); - controller.routing().setGlobalRotationStatus(deployment, endpointStatus); - } else { - // Set policy status - controller.routing().policies().setRoutingStatus(deployment, status, agent); - } + controller.routing().of(deployment).setRoutingStatus(status, agent); return new MessageResponse("Set global routing status for " + deployment + " to " + (in ? "IN" : "OUT")); } @@ -279,66 +254,24 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler { var instances = instanceId == null ? application.instances().values() : List.of(application.instances().get(instanceId.instance())); + EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application); for (var instance : instances) { var zones = zoneId == null ? instance.deployments().keySet().stream().sorted(Comparator.comparing(ZoneId::value)) .collect(Collectors.toList()) : List.of(zoneId); for (var zone : zones) { - var deploymentId = requireDeployment(new DeploymentId(instance.id(), zone), instance); - // Include status from rotation - sharedGlobalRoutingStatus(deploymentId).ifPresent(status -> { - deploymentStatusToSlime(deploymentsArray.addObject(), deploymentId, status, RoutingMethod.shared); - }); - - // Include status from routing policies - directGlobalRoutingStatus(deploymentId).forEach(status -> { - deploymentStatusToSlime(deploymentsArray.addObject(), deploymentId, status, RoutingMethod.exclusive); - }); - } - } - } - - } - - private Optional<RoutingStatus> sharedGlobalRoutingStatus(DeploymentId deploymentId) { - if (sharedRoutingIn(deploymentId.zoneId())) { - var rotationStatus = controller.routing().globalRotationStatus(deploymentId); - // Status is equal across all global endpoints, as the status is per deployment, not per endpoint. - var endpointStatus = rotationStatus.values().stream().findFirst(); - if (endpointStatus.isPresent()) { - var changedAt = Instant.ofEpochSecond(endpointStatus.get().getEpoch()); - RoutingStatus.Agent agent; - try { - agent = RoutingStatus.Agent.valueOf(endpointStatus.get().getAgent()); - } catch (IllegalArgumentException e) { - agent = RoutingStatus.Agent.unknown; + DeploymentId deploymentId = requireDeployment(new DeploymentId(instance.id(), zone), instance); + DeploymentRoutingContext context = controller.routing().of(deploymentId); + if (declaredEndpoints.targets(deploymentId).isEmpty()) continue; // No declared endpoints point to this deployment + deploymentStatusToSlime(deploymentsArray.addObject(), + deploymentId, + context.routingStatus(), + context.routingMethod()); } - var status = endpointStatus.get().getStatus() == EndpointStatus.Status.in - ? RoutingStatus.Value.in - : RoutingStatus.Value.out; - return Optional.of(new RoutingStatus(status, agent, changedAt)); } } - return Optional.empty(); - } - - private List<RoutingStatus> directGlobalRoutingStatus(DeploymentId deploymentId) { - return controller.routing().policies().get(deploymentId).values().stream() - .filter(p -> ! p.instanceEndpoints().isEmpty()) // This policy does not apply to a global endpoint - .filter(p -> exclusiveRoutingIn(p.id().zone())) - .map(p -> p.status().routingStatus()) - .collect(Collectors.toList()); - } - - /** Returns whether given zone uses exclusive routing */ - private boolean exclusiveRoutingIn(ZoneId zone) { - return controller.zoneRegistry().routingMethods(zone).contains(RoutingMethod.exclusive); - } - /** Returns whether given zone uses shared routing */ - private boolean sharedRoutingIn(ZoneId zone) { - return controller.zoneRegistry().routingMethods(zone).stream().anyMatch(RoutingMethod::isShared); } private static void zoneStatusToSlime(Cursor object, ZoneId zone, RoutingStatus routingStatus, RoutingMethod method) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java index 0d12b283543..7e9ae036cc7 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java @@ -25,6 +25,7 @@ import com.yahoo.vespa.flags.IntFlag; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.LockedTenant; +import com.yahoo.vespa.hosted.controller.api.integration.billing.Plan; import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; import com.yahoo.vespa.hosted.controller.api.integration.user.Roles; import com.yahoo.vespa.hosted.controller.api.integration.user.User; @@ -176,6 +177,7 @@ public class UserApiHandler extends LoggingRequestHandler { .sorted() .forEach(tenant -> { Cursor tenantObject = tenants.setObject(tenant.value()); + tenantObject.setBool("supported", hasSupportedPlan(tenant)); Cursor tenantRolesObject = tenantObject.setArray("roles"); tenantRolesByTenantName.getOrDefault(tenant, List.of()) @@ -405,4 +407,11 @@ public class UserApiHandler extends LoggingRequestHandler { .map(clazz::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } + + private boolean hasSupportedPlan(TenantName tenantName) { + var planId = controller.serviceRegistry().billingController().getPlan(tenantName); + return controller.serviceRegistry().planRegistry().plan(planId) + .map(Plan::isSupported) + .orElse(false); + } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java index 634d76c8449..b98ef717dd3 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.routing; import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.zone.RoutingMethod; import com.yahoo.config.provision.zone.ZoneId; @@ -27,17 +26,14 @@ import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue.Priority; import com.yahoo.vespa.hosted.controller.dns.NameServiceRequest; import com.yahoo.vespa.hosted.controller.persistence.CuratorDb; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -62,43 +58,63 @@ public class RoutingPolicies { } } - /** Read all known routing policies for given instance */ - public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application) { - return db.readRoutingPolicies(application); + /** Read all routing policies for given deployment */ + public RoutingPolicyList read(DeploymentId deployment) { + return read(deployment.applicationId()).deployment(deployment); } - /** Read all known routing policies for given deployment */ - public Map<RoutingPolicyId, RoutingPolicy> get(DeploymentId deployment) { - return db.readRoutingPolicies(deployment.applicationId()).entrySet() + /** Read all routing policies for given instance */ + public RoutingPolicyList read(ApplicationId instance) { + return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance)); + } + + /** Read all routing policies for given application */ + private RoutingPolicyList read(TenantAndApplicationId application) { + return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application)) + .values() + .stream() + .flatMap(Collection::stream) + .collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf)); + } + + /** Read all routing policies */ + private RoutingPolicyList readAll() { + return db.readRoutingPolicies() + .values() .stream() - .filter(kv -> kv.getKey().zone().equals(deployment.zoneId())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .flatMap(Collection::stream) + .collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf)); } /** Read routing policy for given zone */ - public ZoneRoutingPolicy get(ZoneId zone) { + public ZoneRoutingPolicy read(ZoneId zone) { return db.readZoneRoutingPolicy(zone); } /** * Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if - * load balancers for given instance have changed. + * routing configuration affecting given deployment has changed. */ - public void refresh(ApplicationId instance, DeploymentSpec deploymentSpec, ZoneId zone) { - LoadBalancerAllocation allocation = new LoadBalancerAllocation(instance, zone, controller.serviceRegistry().configServer() - .getLoadBalancers(instance, zone), - deploymentSpec); + public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) { + ApplicationId instance = deployment.applicationId(); + List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer() + .getLoadBalancers(instance, deployment.zoneId()); + LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec); Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec); try (var lock = db.lockRoutingPolicies()) { - removeGlobalDnsUnreferencedBy(allocation, lock); - removeApplicationDnsUnreferencedBy(allocation, lock); + RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance)); + RoutingPolicyList instancePolicies = applicationPolicies.instance(instance); + RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment); + + removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock); + removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock); - storePoliciesOf(allocation, lock); - removePoliciesUnreferencedBy(allocation, lock); + instancePolicies = storePoliciesOf(allocation, instancePolicies, lock); + instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock); - Collection<RoutingPolicy> policies = get(allocation.deployment.applicationId()).values(); - updateGlobalDnsOf(policies, inactiveZones, lock); - updateApplicationDnsOf(policies, inactiveZones, lock); + applicationPolicies = applicationPolicies.replace(instance, instancePolicies); + updateGlobalDnsOf(instancePolicies, inactiveZones, lock); + updateApplicationDnsOf(applicationPolicies, inactiveZones, lock); } } @@ -107,33 +123,37 @@ public class RoutingPolicies { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); - Map<ApplicationId, Map<RoutingPolicyId, RoutingPolicy>> allPolicies = db.readRoutingPolicies(); - for (var applicationPolicies : allPolicies.values()) { - updateGlobalDnsOf(applicationPolicies.values(), Set.of(), lock); + Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner()); + for (var instancePolicies : allPolicies.values()) { + updateGlobalDnsOf(instancePolicies, Set.of(), lock); } } } /** Set the status of all global endpoints for given deployment */ public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) { + ApplicationId instance = deployment.applicationId(); try (var lock = db.lockRoutingPolicies()) { - var policies = get(deployment.applicationId()); - var newPolicies = new LinkedHashMap<>(policies); - for (var policy : policies.values()) { - if (!policy.appliesTo(deployment)) continue; + RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance)); + RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment); + Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap()); + for (var policy : deploymentPolicies) { var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent, controller.clock().instant()))); - newPolicies.put(policy.id(), newPolicy); + updatedPolicies.put(policy.id(), newPolicy); } - db.writeRoutingPolicies(deployment.applicationId(), newPolicies); - updateGlobalDnsOf(newPolicies.values(), Set.of(), lock); - updateApplicationDnsOf(newPolicies.values(), Set.of(), lock); + + RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values()); + Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner()); + policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList())); + policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock)); + updateApplicationDnsOf(effectivePolicies, Set.of(), lock); } } /** Update global DNS records for given policies */ - private void updateGlobalDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) { - Map<RoutingId, List<RoutingPolicy>> routingTable = instanceRoutingTable(routingPolicies); + private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) { + Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable(); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { RoutingId routingId = routeEntry.getKey(); controller.routing().readDeclaredEndpointsOf(routingId.instance()) @@ -205,17 +225,17 @@ public class RoutingPolicies { } - private void updateApplicationDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) { + private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) { // In the context of single deployment (which this is) there is only one routing policy per routing ID. I.e. // there is no scenario where more than one deployment within an instance can be a member the same // application-level endpoint. However, to allow this in the future the routing table remains // Map<RoutingId, List<RoutingPolicy>> instead of Map<RoutingId, RoutingPolicy>. - Map<RoutingId, List<RoutingPolicy>> routingTable = applicationRoutingTable(routingPolicies); + Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable(); if (routingTable.isEmpty()) return; Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application()); - Map<DeploymentId, Map<EndpointId, Integer>> targetWeights = targetWeights(application); Map<Endpoint, Set<AliasTarget>> targetsByEndpoint = new LinkedHashMap<>(); + Map<Endpoint, Set<AliasTarget>> inactiveTargetsByEndpoint = new LinkedHashMap<>(); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { RoutingId routingId = routeEntry.getKey(); EndpointList endpoints = controller.routing().declaredEndpointsOf(application) @@ -230,32 +250,65 @@ public class RoutingPolicies { for (var policy : routeEntry.getValue()) { for (var target : endpoint.targets()) { if (!policy.appliesTo(target.deployment())) continue; - int weight = target.weight(); - if (isConfiguredOut(policy, inactiveZones) && removableFromApplicationEndpoint(policy, application, targetWeights)) { - weight = 0; - } + if (policy.dnsZone().isEmpty()) continue; // Does not support ALIAS records + ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone()); WeightedAliasTarget weightedAliasTarget = new WeightedAliasTarget(policy.canonicalName(), policy.dnsZone().get(), - target.deployment().zoneId(), weight); - targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>()) - .add(weightedAliasTarget); + target.deployment().zoneId(), target.weight()); + Set<AliasTarget> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>()); + Set<AliasTarget> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>()); + if (isConfiguredOut(zonePolicy, policy, inactiveZones)) { + inactiveTargets.add(weightedAliasTarget); + } else { + activeTargets.add(weightedAliasTarget); + } } } } + + // If all targets are configured OUT, all targets are kept IN. We do this because otherwise removing 100% of + // the ALIAS records would cause the application endpoint to stop resolving entirely (NXDOMAIN). + for (var kv : targetsByEndpoint.entrySet()) { + Endpoint endpoint = kv.getKey(); + Set<AliasTarget> activeTargets = kv.getValue(); + if (!activeTargets.isEmpty()) { + continue; + } + Set<AliasTarget> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint); + activeTargets.addAll(inactiveTargets); + inactiveTargets.clear(); + } targetsByEndpoint.forEach((applicationEndpoint, targets) -> { ZoneId targetZone = applicationEndpoint.targets().stream() - .map(Endpoint.Target::deployment) - .map(DeploymentId::zoneId) - .findFirst() - .get(); + .map(Endpoint.Target::deployment) + .map(DeploymentId::zoneId) + .findFirst() + .get(); nameServiceForwarderIn(targetZone).createAlias(RecordName.from(applicationEndpoint.dnsName()), targets, Priority.normal); }); + inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> { + ZoneId targetZone = applicationEndpoint.targets().stream() + .map(Endpoint.Target::deployment) + .map(DeploymentId::zoneId) + .findFirst() + .get(); + targets.forEach(target -> { + nameServiceForwarderIn(targetZone).removeRecords(Record.Type.ALIAS, + RecordName.from(applicationEndpoint.dnsName()), + RecordData.fqdn(target.name().value()), + Priority.normal); + }); + }); } - /** Store routing policies for given load balancers */ - private void storePoliciesOf(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) { - var policies = new LinkedHashMap<>(get(allocation.deployment.applicationId())); + /** + * Store routing policies for given load balancers + * + * @return the updated policies + */ + private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Lock lock) { + Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap()); for (LoadBalancer loadBalancer : allocation.loadBalancers) { if (loadBalancer.hostname().isEmpty()) continue; var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId()); @@ -271,7 +324,9 @@ public class RoutingPolicies { updateZoneDnsOf(newPolicy); policies.put(newPolicy.id(), newPolicy); } - db.writeRoutingPolicies(allocation.deployment.applicationId(), policies); + RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values()); + db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList()); + return updated; } /** Update zone DNS record for given policy */ @@ -283,14 +338,17 @@ public class RoutingPolicies { } } - /** Remove policies and zone DNS records unreferenced by given load balancers */ - private void removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) { - var policies = get(allocation.deployment.applicationId()); - var newPolicies = new LinkedHashMap<>(policies); - var activeIds = allocation.asPolicyIds(); - for (var policy : policies.values()) { - // Leave active load balancers and irrelevant zones alone - if (activeIds.contains(policy.id()) || !policy.appliesTo(allocation.deployment)) continue; + /** + * Remove policies and zone DNS records unreferenced by given load balancers + * + * @return the updated policies + */ + private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Lock lock) { + Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap()); + Set<RoutingPolicyId> activeIds = allocation.asPolicyIds(); + RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment) + .not().matching(policy -> activeIds.contains(policy.id())); + for (var policy : removable) { for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) { var dnsName = endpoint.dnsName(); nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME, @@ -299,13 +357,14 @@ public class RoutingPolicies { } newPolicies.remove(policy.id()); } - db.writeRoutingPolicies(allocation.deployment.applicationId(), newPolicies); + RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values()); + db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList()); + return updated; } /** Remove unreferenced instance endpoints from DNS */ - private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) { - Collection<RoutingPolicy> zonePolicies = get(allocation.deployment).values(); - Set<RoutingId> removalCandidates = new HashSet<>(instanceRoutingTable(zonePolicies).keySet()); + private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Lock lock) { + Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet()); Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation); removalCandidates.removeAll(activeRoutingIds); for (var id : removalCandidates) { @@ -321,9 +380,8 @@ public class RoutingPolicies { } /** Remove unreferenced application endpoints in given allocation from DNS */ - private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) { - Collection<RoutingPolicy> zonePolicies = get(allocation.deployment).values(); - Map<RoutingId, List<RoutingPolicy>> routingTable = applicationRoutingTable(zonePolicies); + private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Lock lock) { + Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable(); Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet()); Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation); removalCandidates.removeAll(activeRoutingIds); @@ -344,42 +402,6 @@ public class RoutingPolicies { } } - /** Returns whether we disable given policy from its application endpoints, taking weights and status of other instances into account */ - private boolean removableFromApplicationEndpoint(RoutingPolicy policy, Application application, Map<DeploymentId, Map<EndpointId, Integer>> targetWeights) { - List<RoutingPolicy> relatedPolicies = application.productionInstances().keySet().stream() - .filter(instanceName -> !policy.id().owner().instance().equals(instanceName)) - .map(instanceName -> application.id().instance(instanceName)) - .flatMap(instance -> get(instance).values().stream()) - .filter(relatedPolicy -> relatedPolicy.id().zone().equals(policy.id().zone()) && - relatedPolicy.id().cluster().equals(policy.id().cluster())) - .collect(Collectors.toUnmodifiableList()); - for (var endpointId : policy.applicationEndpoints()) { - boolean anyIn = relatedPolicies.stream() - .anyMatch(rp -> rp.applicationEndpoints().contains(endpointId) && - rp.status().routingStatus().value() == RoutingStatus.Value.in && - targetWeights.get(rp.id().deployment()) - .get(endpointId) > 0); - if (!anyIn) { - return false; - } - } - return true; - } - - /** Returns target weights of application endpoints in given application, grouped by deployment */ - private Map<DeploymentId, Map<EndpointId, Integer>> targetWeights(Application application) { - Map<DeploymentId, Map<EndpointId, Integer>> weights = new HashMap<>(); - for (var endpoint : application.deploymentSpec().endpoints()) { - for (var target : endpoint.targets()) { - weights.computeIfAbsent(new DeploymentId(application.id().instance(target.instance()), - ZoneId.from(Environment.prod, target.region())), - (k) -> new HashMap<>()) - .put(EndpointId.of(endpoint.endpointId()), target.weight()); - } - } - return weights; - } - private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) { return routingIdsFrom(allocation, false); } @@ -402,45 +424,13 @@ public class RoutingPolicies { return Collections.unmodifiableSet(routingIds); } - /** Compute a routing table for instance-level endpoints from given policies */ - private static Map<RoutingId, List<RoutingPolicy>> instanceRoutingTable(Collection<RoutingPolicy> routingPolicies) { - return routingTable(routingPolicies, false); - } - - /** Compute a routing table for application-level endpoints from given policies */ - private static Map<RoutingId, List<RoutingPolicy>> applicationRoutingTable(Collection<RoutingPolicy> routingPolicies) { - return routingTable(routingPolicies, true); - } - - private static Map<RoutingId, List<RoutingPolicy>> routingTable(Collection<RoutingPolicy> routingPolicies, boolean applicationLevel) { - Map<RoutingId, List<RoutingPolicy>> routingTable = new LinkedHashMap<>(); - for (var policy : routingPolicies) { - Set<EndpointId> endpoints = applicationLevel ? policy.applicationEndpoints() : policy.instanceEndpoints(); - for (var endpoint : endpoints) { - RoutingId id = RoutingId.of(policy.id().owner(), endpoint); - routingTable.computeIfAbsent(id, k -> new ArrayList<>()) - .add(policy); - } - } - return Collections.unmodifiableMap(routingTable); - } - - /** Returns whether the endpoints of given policy are globally configured {@link RoutingStatus.Value#out} */ - private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) { - return isConfiguredOut(policy, Optional.of(zonePolicy), inactiveZones); - } - /** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value#out} */ - private static boolean isConfiguredOut(RoutingPolicy policy, Set<ZoneId> inactiveZones) { - return isConfiguredOut(policy, Optional.empty(), inactiveZones); - } - - private static boolean isConfiguredOut(RoutingPolicy policy, Optional<ZoneRoutingPolicy> zonePolicy, Set<ZoneId> inactiveZones) { + private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) { // A deployment can be configured out from endpoints at any of the following levels: - // - zone level (ZoneRoutingPolicy, only applies to global endpoints) + // - zone level (ZoneRoutingPolicy) // - deployment level (RoutingPolicy) // - application package level (deployment.xml) - return (zonePolicy.isPresent() && zonePolicy.get().routingStatus().value() == RoutingStatus.Value.out) || + return zonePolicy.routingStatus().value() == RoutingStatus.Value.out || policy.status().routingStatus().value() == RoutingStatus.Value.out || inactiveZones.contains(policy.id().zone()); } @@ -498,9 +488,9 @@ public class RoutingPolicies { private final List<LoadBalancer> loadBalancers; private final DeploymentSpec deploymentSpec; - private LoadBalancerAllocation(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers, + private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment, DeploymentSpec deploymentSpec) { - this.deployment = new DeploymentId(application, zone); + this.deployment = deployment; this.loadBalancers = List.copyOf(loadBalancers); this.deploymentSpec = deploymentSpec; } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java index e9cbdbd9b75..d64241b1239 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java @@ -60,4 +60,9 @@ public class RoutingPolicyId { return Objects.hash(owner, cluster, zone); } + @Override + public String toString() { + return "routing policy for " + cluster + ", in " + zone + ", owned by " + owner; + } + } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java new file mode 100644 index 00000000000..a5efc016c68 --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java @@ -0,0 +1,99 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.routing; + +import com.yahoo.collections.AbstractFilteringList; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; +import com.yahoo.vespa.hosted.controller.application.EndpointId; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * A filterable list of {@link RoutingPolicy}'s. + * + * This is immutable. + * + * @author mpolden + */ +public class RoutingPolicyList extends AbstractFilteringList<RoutingPolicy, RoutingPolicyList> { + + private final Map<RoutingPolicyId, RoutingPolicy> policiesById; + + protected RoutingPolicyList(Collection<RoutingPolicy> items, boolean negate) { + super(items, negate, RoutingPolicyList::new); + this.policiesById = items.stream().collect(Collectors.collectingAndThen( + Collectors.toMap(RoutingPolicy::id, + Function.identity(), + (p1, p2) -> { + throw new IllegalArgumentException("Duplicate key " + p1.id()); + }, + LinkedHashMap::new), + Collections::unmodifiableMap) + ); + } + + /** Returns the subset of policies owned by given instance */ + public RoutingPolicyList instance(ApplicationId instance) { + return matching(policy -> policy.id().owner().equals(instance)); + } + + /** Returns the subset of policies applying to given deployment */ + public RoutingPolicyList deployment(DeploymentId deployment) { + return matching(policy -> policy.appliesTo(deployment)); + } + + /** Returns the policy with given ID, if any */ + public Optional<RoutingPolicy> of(RoutingPolicyId id) { + return Optional.ofNullable(policiesById.get(id)); + } + + /** Returns this grouped by policy ID */ + public Map<RoutingPolicyId, RoutingPolicy> asMap() { + return policiesById; + } + + /** Returns a copy of this with all policies for instance replaced with given policies */ + public RoutingPolicyList replace(ApplicationId instance, RoutingPolicyList policies) { + List<RoutingPolicy> copy = new ArrayList<>(asList()); + copy.removeIf(policy -> policy.id().owner().equals(instance)); + policies.forEach(copy::add); + return copyOf(copy); + } + + /** Create a routing table for instance-level endpoints backed by routing policies in this */ + Map<RoutingId, List<RoutingPolicy>> asInstanceRoutingTable() { + return asRoutingTable(false); + } + + /** Create a routing table for application-level endpoints backed by routing policies in this */ + Map<RoutingId, List<RoutingPolicy>> asApplicationRoutingTable() { + return asRoutingTable(true); + } + + private Map<RoutingId, List<RoutingPolicy>> asRoutingTable(boolean applicationLevel) { + Map<RoutingId, List<RoutingPolicy>> routingTable = new LinkedHashMap<>(); + for (var policy : this) { + Set<EndpointId> endpoints = applicationLevel ? policy.applicationEndpoints() : policy.instanceEndpoints(); + for (var endpoint : endpoints) { + RoutingId id = RoutingId.of(policy.id().owner(), endpoint); + routingTable.computeIfAbsent(id, k -> new ArrayList<>()) + .add(policy); + } + } + return Collections.unmodifiableMap(routingTable); + } + + public static RoutingPolicyList copyOf(Collection<RoutingPolicy> policies) { + return new RoutingPolicyList(policies, false); + } + +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java new file mode 100644 index 00000000000..e5eb1382ccf --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java @@ -0,0 +1,165 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.routing.context; + +import com.yahoo.config.application.api.DeploymentSpec; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.zone.RoutingMethod; +import com.yahoo.vespa.hosted.controller.LockedApplication; +import com.yahoo.vespa.hosted.controller.RoutingController; +import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus; +import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; +import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer; +import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint; +import com.yahoo.vespa.hosted.controller.application.Endpoint; +import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; +import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; + +import java.time.Clock; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * A deployment routing context, which extends {@link RoutingContext} to support routing configuration of a deployment. + * + * @author mpolden + */ +public abstract class DeploymentRoutingContext implements RoutingContext { + + final DeploymentId deployment; + final RoutingController controller; + final RoutingMethod method; + + public DeploymentRoutingContext(DeploymentId deployment, RoutingMethod method, RoutingController controller) { + this.deployment = Objects.requireNonNull(deployment); + this.controller = Objects.requireNonNull(controller); + this.method = Objects.requireNonNull(method); + } + + /** + * Prepare routing configuration for the deployment in this context + * + * @return the container endpoints relevant for this deployment, as declared in deployment spec + */ + public final Set<ContainerEndpoint> prepare(LockedApplication application) { + return controller.containerEndpointsOf(application, deployment.applicationId().instance(), deployment.zoneId()); + } + + /** Configure routing for the deployment in this context, using given deployment spec */ + public final void configure(DeploymentSpec deploymentSpec) { + controller.policies().refresh(deployment, deploymentSpec); + } + + /** Routing method of this context */ + public final RoutingMethod routingMethod() { + return method; + } + + /** Read the routing policy for given cluster in this deployment */ + public final Optional<RoutingPolicy> routingPolicy(ClusterSpec.Id cluster) { + RoutingPolicyId id = new RoutingPolicyId(deployment.applicationId(), cluster, deployment.zoneId()); + return controller.policies().read(deployment).of(id); + } + + /** + * Extension of a {@link DeploymentRoutingContext} for deployments using either {@link RoutingMethod#shared} or + * {@link RoutingMethod#sharedLayer4} routing. + */ + public static class SharedDeploymentRoutingContext extends DeploymentRoutingContext { + + private final Clock clock; + private final ConfigServer configServer; + + public SharedDeploymentRoutingContext(DeploymentId deployment, RoutingController controller, ConfigServer configServer, Clock clock) { + super(deployment, RoutingMethod.shared, controller); + this.clock = Objects.requireNonNull(clock); + this.configServer = Objects.requireNonNull(configServer); + } + + @Override + public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { + EndpointStatus newStatus = new EndpointStatus(value == RoutingStatus.Value.in + ? EndpointStatus.Status.in + : EndpointStatus.Status.out, + agent.name(), + clock.instant()); + try { + configServer.setGlobalRotationStatus(deployment, upstreamNames(), newStatus); + } catch (Exception e) { + throw new RuntimeException("Failed to change rotation status of " + deployment, e); + } + } + + @Override + public RoutingStatus routingStatus() { + // In a given deployment, all upstreams (clusters) share the same status, so we can query using any + // upstream name + String upstreamName = upstreamNames().get(0); + EndpointStatus status = configServer.getGlobalRotationStatus(deployment, upstreamName); + RoutingStatus.Agent agent; + try { + agent = RoutingStatus.Agent.valueOf(status.agent().toLowerCase()); + } catch (IllegalArgumentException e) { + agent = RoutingStatus.Agent.unknown; + } + return new RoutingStatus(status.status() == EndpointStatus.Status.in + ? RoutingStatus.Value.in + : RoutingStatus.Value.out, + agent, + status.changedAt()); + } + + private List<String> upstreamNames() { + List<String> upstreamNames = controller.readEndpointsOf(deployment) + .scope(Endpoint.Scope.zone) + .shared() + .asList().stream() + .map(endpoint -> endpoint.upstreamName(deployment)) + .distinct() + .collect(Collectors.toList()); + if (upstreamNames.isEmpty()) { + throw new IllegalArgumentException("No upstream names found for " + deployment); + } + return upstreamNames; + } + + private Optional<Endpoint> primaryEndpoint() { + return controller.readDeclaredEndpointsOf(deployment.applicationId()) + .requiresRotation() + .primary(); + } + + } + + /** + * Implementation of a {@link DeploymentRoutingContext} for deployments using {@link RoutingMethod#exclusive} + * routing. + */ + public static class ExclusiveDeploymentRoutingContext extends DeploymentRoutingContext { + + public ExclusiveDeploymentRoutingContext(DeploymentId deployment, RoutingController controller) { + super(deployment, RoutingMethod.exclusive, controller); + } + + @Override + public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { + controller.policies().setRoutingStatus(deployment, value, agent); + } + + @Override + public RoutingStatus routingStatus() { + // Status for a deployment applies to all clusters within the deployment, so we use the status from the + // first matching policy here + return controller.policies().read(deployment) + .first() + .map(RoutingPolicy::status) + .map(RoutingPolicy.Status::routingStatus) + .orElse(RoutingStatus.DEFAULT); + } + + } + +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java new file mode 100644 index 00000000000..75009e0b37a --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java @@ -0,0 +1,41 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.routing.context; + +import com.yahoo.config.provision.zone.RoutingMethod; +import com.yahoo.config.provision.zone.ZoneId; +import com.yahoo.vespa.hosted.controller.routing.RoutingPolicies; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; + +import java.util.Objects; + +/** + * An implementation of {@link RoutingContext} for a zone using {@link RoutingMethod#exclusive} routing. + * + * @author mpolden + */ +public class ExclusiveZoneRoutingContext implements RoutingContext { + + private final RoutingPolicies policies; + private final ZoneId zone; + + public ExclusiveZoneRoutingContext(ZoneId zone, RoutingPolicies policies) { + this.policies = Objects.requireNonNull(policies); + this.zone = Objects.requireNonNull(zone); + } + + @Override + public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { + policies.setRoutingStatus(zone, value); + } + + @Override + public RoutingStatus routingStatus() { + return policies.read(zone).routingStatus(); + } + + @Override + public RoutingMethod routingMethod() { + return RoutingMethod.exclusive; + } + +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/RoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/RoutingContext.java new file mode 100644 index 00000000000..6f43416b9b5 --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/RoutingContext.java @@ -0,0 +1,23 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.routing.context; + +import com.yahoo.config.provision.zone.RoutingMethod; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; + +/** + * Top-level interface for a routing context, which provides control of routing status for a deployment or zone. + * + * @author mpolden + */ +public interface RoutingContext { + + /** Change the routing status for the zone or deployment represented by this context */ + void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent); + + /** Get the current routing status for the zone or deployment represented by this context */ + RoutingStatus routingStatus(); + + /** Routing method used in this context */ + RoutingMethod routingMethod(); + +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/SharedZoneRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/SharedZoneRoutingContext.java new file mode 100644 index 00000000000..2923c8dff5c --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/SharedZoneRoutingContext.java @@ -0,0 +1,48 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.routing.context; + +import com.yahoo.config.provision.zone.RoutingMethod; +import com.yahoo.config.provision.zone.ZoneId; +import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; + +import java.time.Instant; +import java.util.Objects; + +/** + * An implementation of {@link RoutingContext} for a zone, using either {@link RoutingMethod#shared} or + * {@link RoutingMethod#sharedLayer4} routing. + * + * @author mpolden + */ +public class SharedZoneRoutingContext implements RoutingContext { + + private final ConfigServer configServer; + private final ZoneId zone; + + public SharedZoneRoutingContext(ZoneId zone, ConfigServer configServer) { + this.configServer = Objects.requireNonNull(configServer); + this.zone = Objects.requireNonNull(zone); + } + + @Override + public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { + boolean in = value == RoutingStatus.Value.in; + configServer.setGlobalRotationStatus(zone, in); + } + + @Override + public RoutingStatus routingStatus() { + boolean in = configServer.getGlobalRotationStatus(zone); + RoutingStatus.Value newValue = in ? RoutingStatus.Value.in : RoutingStatus.Value.out; + return new RoutingStatus(newValue, + RoutingStatus.Agent.operator, + Instant.EPOCH); // API does not support time of change + } + + @Override + public RoutingMethod routingMethod() { + return RoutingMethod.shared; + } + +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/Rotation.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/Rotation.java index ca5d2d5915f..0cf7101cac0 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/Rotation.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/Rotation.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import com.yahoo.text.Text; import java.util.Objects; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationId.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationId.java index 2b75777fbbd..4d97962a40a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationId.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationId.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import java.util.Objects; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationLock.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationLock.java index fe9280b1193..36a43f80e9a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationLock.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationLock.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import com.yahoo.vespa.curator.Lock; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationRepository.java index 5b24f39717b..39a0b6a8858 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationRepository.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import com.yahoo.config.application.api.DeploymentInstanceSpec; import com.yahoo.config.application.api.DeploymentSpec; @@ -21,7 +21,6 @@ import java.util.Comparator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.logging.Logger; @@ -56,9 +55,11 @@ public class RotationRepository { return new RotationLock(curator.lockRotations()); } - /** Get rotation by given rotationId */ - public Optional<Rotation> getRotation(RotationId rotationId) { - return Optional.of(allRotations.get(rotationId)); + /** Get rotation with given id */ + public Rotation requireRotation(RotationId id) { + Rotation rotation = allRotations.get(id); + if (rotation == null) throw new IllegalArgumentException("No such rotation: '" + id.asString() + "'"); + return rotation; } /** diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationState.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationState.java index 032f01433b3..19e816a0b51 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationState.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationState.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; /** * The possible states of a global rotation. diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationStatus.java index 1ddbd640e53..6d95ad9a230 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationStatus.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationStatus.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.application.Deployment; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java index be180f27af6..1215ddbc2ad 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java @@ -13,13 +13,11 @@ import com.yahoo.config.provision.CloudName; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.HostName; -import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.zone.RoutingMethod; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.path.Path; -import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata; import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint; @@ -38,8 +36,10 @@ import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock; import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb; -import com.yahoo.vespa.hosted.controller.rotation.RotationId; -import com.yahoo.vespa.hosted.controller.rotation.RotationLock; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationId; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationLock; import com.yahoo.vespa.hosted.rotation.config.RotationsConfig; import org.junit.Test; @@ -49,6 +49,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalInt; import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; @@ -214,44 +215,59 @@ public class ControllerTest { // Check initial rotation status var deployment1 = context.deploymentIdIn(zone1); - var status1 = tester.controller().routing().globalRotationStatus(deployment1); - assertEquals(1, status1.size()); - assertTrue("All upstreams are in", status1.values().stream().allMatch(es -> es.getStatus() == EndpointStatus.Status.in)); + DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); + RoutingStatus status1 = routingContext.routingStatus(); + assertEquals(RoutingStatus.Value.in, status1.value()); // Set the deployment out of service in the global rotation - var newStatus = new EndpointStatus(EndpointStatus.Status.out, "unit-test", ControllerTest.class.getSimpleName(), tester.clock().instant().getEpochSecond()); - tester.controller().routing().setGlobalRotationStatus(deployment1, newStatus); - status1 = tester.controller().routing().globalRotationStatus(deployment1); - assertEquals(1, status1.size()); - assertTrue("All upstreams are out", status1.values().stream().allMatch(es -> es.getStatus() == EndpointStatus.Status.out)); - assertTrue("Reason is set", status1.values().stream().allMatch(es -> es.getReason().equals("unit-test"))); + routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); + RoutingStatus status2 = routingContext.routingStatus(); + assertEquals(RoutingStatus.Value.out, status2.value()); // Other deployment remains in - var status2 = tester.controller().routing().globalRotationStatus(context.deploymentIdIn(zone2)); - assertEquals(1, status2.size()); - assertTrue("All upstreams are in", status2.values().stream().allMatch(es -> es.getStatus() == EndpointStatus.Status.in)); + RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); + assertEquals(RoutingStatus.Value.in, status3.value()); } @Test public void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); + + ZoneId usWest = ZoneId.from("prod.us-west-1"); + ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() + .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .instances("beta,default") .endpoint("default", "foo") - .region("us-west-1") - .region("us-central-1") // Two deployments should result in each DNS alias being registered once + .region(usWest.region()) + .region(usCentral.region()) // Two deployments should result in each DNS alias being registered once .build(); + tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), + RoutingMethod.shared, + RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { // Expected rotation names are passed to beta instance deployments Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); + Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", + "global", + List.of("beta--app1--tenant1.global.vespa.oath.cloud", + "rotation-id-01"), + OptionalInt.empty(), + RoutingMethod.shared), + new ContainerEndpoint("foo", + "global", + List.of("beta.app1.tenant1.global.vespa.oath.cloud", + "rotation-id-01"), + OptionalInt.empty(), + RoutingMethod.sharedLayer4)); + for (Deployment deployment : betaDeployments) { - assertEquals("Rotation names are passed to config server in " + deployment.zone(), - Set.of("rotation-id-01", - "beta--app1--tenant1.global.vespa.oath.cloud"), - tester.configServer().containerEndpointNames(betaContext.deploymentIdIn(deployment.zone()))); + assertEquals(containerEndpoints, + tester.configServer().containerEndpoints() + .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } @@ -259,11 +275,21 @@ public class ControllerTest { { // Expected rotation names are passed to default instance deployments Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); + Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", + "global", + List.of("app1--tenant1.global.vespa.oath.cloud", + "rotation-id-02"), + OptionalInt.empty(), + RoutingMethod.shared), + new ContainerEndpoint("foo", + "global", + List.of("app1.tenant1.global.vespa.oath.cloud", + "rotation-id-02"), + OptionalInt.empty(), + RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { - assertEquals("Rotation names are passed to config server in " + deployment.zone(), - Set.of("rotation-id-02", - "app1--tenant1.global.vespa.oath.cloud"), - tester.configServer().containerEndpointNames(defaultContext.deploymentIdIn(deployment.zone()))); + assertEquals(containerEndpoints, + tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } @@ -277,13 +303,17 @@ public class ControllerTest { assertEquals(data, record.get().data().asString()); }); - Map<ApplicationId, List<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), List.of("beta--app1--tenant1.global.vespa.oath.cloud"), - defaultContext.instanceId(), List.of("app1--tenant1.global.vespa.oath.cloud")); + Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta--app1--tenant1.global.vespa.oath.cloud", + "beta.app1.tenant1.global.vespa.oath.cloud"), + defaultContext.instanceId(), Set.of("app1--tenant1.global.vespa.oath.cloud", + "app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { - List<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) - .scope(Endpoint.Scope.global) - .mapToList(Endpoint::dnsName); + Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) + .scope(Endpoint.Scope.global) + .asList().stream() + .map(Endpoint::dnsName) + .collect(Collectors.toSet()); assertEquals("Global DNS names for " + instance, dnsNames, actualDnsNames); }); } @@ -620,33 +650,46 @@ public class ControllerTest { @Test public void testDnsUpdatesForApplicationEndpoint() { - var context = tester.newDeploymentContext("tenant1", "app1", "beta"); + ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); + ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); + var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", - Map.of(InstanceName.from("beta"), 2, - InstanceName.from("main"), 8)) + Map.of(beta.instance(), 2, + main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", - Map.of(InstanceName.from("beta"), 1, - InstanceName.from("main"), 1)) + Map.of(beta.instance(), 1, + main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", - Map.of(InstanceName.from("beta"), 4, - InstanceName.from("main"), 6)) + Map.of(beta.instance(), 4, + main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); - // Endpoint names are passed to each deployment - DeploymentId usWest = context.deploymentIdIn(ZoneId.from("prod", "us-west-1")); - DeploymentId usEast = context.deploymentIdIn(ZoneId.from("prod", "us-east-3")); - Map<DeploymentId, List<String>> deploymentEndpoints = Map.of(usWest, List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), - usEast, List.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud")); - deploymentEndpoints.forEach((zone, endpointNames) -> { - assertEquals("Endpoint names are passed to config server in " + zone, - Set.of(new ContainerEndpoint("default", "application", - endpointNames)), - tester.configServer().containerEndpoints().get(zone)); + ZoneId usWest = ZoneId.from("prod", "us-west-1"); + ZoneId usEast = ZoneId.from("prod", "us-east-3"); + // Expected container endpoints are passed to each deployment + Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( + new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, + "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), + new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, + "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), + new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), + new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) + ); + deploymentEndpoints.forEach((deployment, endpoints) -> { + Set<ContainerEndpoint> expected = endpoints.entrySet().stream() + .map(kv -> new ContainerEndpoint("default", "application", + List.of(kv.getKey()), + OptionalInt.of(kv.getValue()), + RoutingMethod.sharedLayer4)) + .collect(Collectors.toSet()); + assertEquals("Endpoint names for " + deployment + " are passed to config server", + expected, + tester.configServer().containerEndpoints().get(deployment)); }); context.flushDnsUpdates(); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java index 46d27911de4..e50c32d0e5d 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java @@ -387,8 +387,8 @@ public class EndpointTest { "c2.i2.a2.t2.us-north-1.prod", Endpoint.of(instance2).target(EndpointId.of("ignored2"), ClusterSpec.Id.from("c2"), List.of(zone2)).on(Port.tls(4443)).in(SystemName.main) ); - tests1.forEach((expected, endpoint) -> assertEquals(expected, endpoint.upstreamIdOf(zone))); - tests2.forEach((expected, endpoint) -> assertEquals(expected, endpoint.upstreamIdOf(zone2))); + tests1.forEach((expected, endpoint) -> assertEquals(expected, endpoint.upstreamName(zone))); + tests2.forEach((expected, endpoint) -> assertEquals(expected, endpoint.upstreamName(zone2))); } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java index d98789591ab..699721b128c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java @@ -240,13 +240,13 @@ public class DeploymentContext { public DeploymentContext addInactiveRoutingPolicy(ZoneId zone) { var clusterId = "default-inactive"; var id = new RoutingPolicyId(instanceId, ClusterSpec.Id.from(clusterId), zone); - var policies = new LinkedHashMap<>(tester.controller().curator().readRoutingPolicies(instanceId)); + var policies = new LinkedHashMap<>(tester.controller().routing().policies().read(instanceId).asMap()); policies.put(id, new RoutingPolicy(id, HostName.from("lb-host"), Optional.empty(), Set.of(EndpointId.of("default")), Set.of(), new RoutingPolicy.Status(false, RoutingStatus.DEFAULT))); - tester.controller().curator().writeRoutingPolicies(instanceId, policies); + tester.controller().curator().writeRoutingPolicies(instanceId, List.copyOf(policies.values())); return this; } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java index 1f5fa243838..102dfde16ec 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java @@ -465,6 +465,21 @@ public class DeploymentTriggerTest { } @Test + public void settingANoOpChangeIsANoOp() { + var app = tester.newDeploymentContext().submit().deploy(); + ApplicationVersion appVersion0 = app.lastSubmission().get(); + app.submit().deploy(); + ApplicationVersion appVersion1 = app.lastSubmission().get(); + + // Triggering a roll-out of an already deployed application is a no-op. + assertEquals(Change.empty(), app.instance().change()); + tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); + assertEquals(Change.empty(), app.instance().change()); + tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); + assertEquals(Change.empty(), app.instance().change()); + } + + @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java index 5cf554f2c01..ae92fd46f26 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java @@ -210,7 +210,7 @@ public class InternalStepRunnerTest { assertEquals(unfinished, tester.jobs().last(app.instanceId(), JobType.systemTest).get().stepStatuses().get(Step.installReal)); assertEquals(failed, tester.jobs().last(app.instanceId(), JobType.stagingTest).get().stepStatuses().get(Step.installInitialReal)); - tester.clock().advance(InternalStepRunner.Timeouts.of(system()).nodesDown().minus(Duration.ofSeconds(3))); + tester.clock().advance(InternalStepRunner.Timeouts.of(system()).statelessNodesDown().minus(Duration.ofSeconds(3))); tester.runner().run(); assertEquals(unfinished, tester.jobs().last(app.instanceId(), JobType.systemTest).get().stepStatuses().get(Step.installReal)); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java index f2fc624630c..aa53d09be04 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java @@ -80,7 +80,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer private final Map<DeploymentId, Application> applications = new LinkedHashMap<>(); private final Set<ZoneId> inactiveZones = new HashSet<>(); - private final Map<String, EndpointStatus> endpoints = new HashMap<>(); + private final Map<DeploymentId, EndpointStatus> endpoints = new HashMap<>(); private final NodeRepositoryMock nodeRepository = new NodeRepositoryMock(); private final Map<DeploymentId, ServiceConvergence> serviceStatus = new HashMap<>(); private final Set<ApplicationId> disallowConvergenceCheckApplications = new HashSet<>(); @@ -536,8 +536,8 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer } @Override - public void setGlobalRotationStatus(DeploymentId deployment, String upstreamName, EndpointStatus status) { - endpoints.put(upstreamName, status); + public void setGlobalRotationStatus(DeploymentId deployment, List<String> upstreamNames, EndpointStatus status) { + endpoints.put(deployment, status); } @Override @@ -550,9 +550,9 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer } @Override - public EndpointStatus getGlobalRotationStatus(DeploymentId deployment, String endpoint) { - EndpointStatus result = new EndpointStatus(EndpointStatus.Status.in, "", "", 1497618757L); - return endpoints.getOrDefault(endpoint, result); + public EndpointStatus getGlobalRotationStatus(DeploymentId deployment, String upstreamName) { + EndpointStatus status = new EndpointStatus(EndpointStatus.Status.in, "", Instant.ofEpochSecond(1497618757L)); + return endpoints.getOrDefault(deployment, status); } @Override diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java index 43ef9daa178..b81b3ae5d66 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java @@ -44,6 +44,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockTesterCloud; +import com.yahoo.vespa.hosted.controller.api.integration.user.RoleMaintainer; +import com.yahoo.vespa.hosted.controller.api.integration.user.RoleMaintainerMock; import com.yahoo.vespa.hosted.controller.api.integration.vcmr.MockChangeRequestClient; /** @@ -86,6 +88,7 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg private final PlanRegistry planRegistry = new PlanRegistryMock(); private final ResourceDatabaseClient resourceDb = new ResourceDatabaseClientMock(planRegistry); private final BillingDatabaseClient billingDb = new BillingDatabaseClientMock(clock, planRegistry); + private final RoleMaintainerMock roleMaintainer = new RoleMaintainerMock(); public ServiceRegistryMock(SystemName system) { this.zoneRegistryMock = new ZoneRegistryMock(system); @@ -267,6 +270,11 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg return billingDb; } + @Override + public RoleMaintainer roleMaintainer() { + return roleMaintainer; + } + public ConfigServerMock configServerMock() { return configServerMock; } @@ -283,4 +291,7 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg return endpointCertificateMock; } + public RoleMaintainerMock roleMaintainerMock() { + return roleMaintainer; + } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java index 52cb3ce121f..e35c2058eb4 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java @@ -1,13 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.maintenance; -import com.yahoo.config.provision.ApplicationName; -import com.yahoo.config.provision.TenantName; +import com.yahoo.config.provision.SystemName; import com.yahoo.vespa.hosted.controller.ControllerTester; -import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockUserManagement; -import com.yahoo.vespa.hosted.controller.api.integration.user.Roles; -import com.yahoo.vespa.hosted.controller.api.integration.user.UserManagement; -import com.yahoo.vespa.hosted.controller.api.role.Role; import org.junit.Test; import java.time.Duration; @@ -19,29 +14,45 @@ import static org.junit.Assert.*; */ public class UserManagementMaintainerTest { - private final ControllerTester tester = new ControllerTester(); - private final UserManagement userManagement = new MockUserManagement(); - private final UserManagementMaintainer userManagementMaintainer = new UserManagementMaintainer(tester.controller(), Duration.ofMinutes(1), userManagement); - - private final TenantName tenant = TenantName.from("tenant1"); - private final ApplicationName app = ApplicationName.from("app1"); - private final TenantName deletedTenant = TenantName.from("deleted-tenant"); + private final String TENANT_1 = "tenant1"; + private final String TENANT_2 = "tenant2"; + private final String APP_NAME = "some-app"; @Test - public void finds_superfluous_roles() { - tester.createTenant(tenant.value()); - tester.createApplication(tenant.value(), app.value()); + public void deletes_tenant_when_not_public() { + var tester = createTester(SystemName.main); + var maintainer = new UserManagementMaintainer(tester.controller(), Duration.ofMinutes(5), tester.serviceRegistry().roleMaintainer()); + maintainer.maintain(); + + var tenants = tester.controller().tenants().asList(); + var apps = tester.controller().applications().asList(); + assertEquals(1, tenants.size()); + assertEquals(1, apps.size()); + assertEquals(TENANT_2, tenants.get(0).name().value()); + } - Roles.tenantRoles(tenant).forEach(userManagement::createRole); - Roles.applicationRoles(tenant, app).forEach(userManagement::createRole); - Roles.tenantRoles(deletedTenant).forEach(userManagement::createRole); - userManagement.createRole(Role.hostedSupporter()); + @Test + public void no_tenant_deletion_in_public() { + var tester = createTester(SystemName.Public); + var maintainer = new UserManagementMaintainer(tester.controller(), Duration.ofMinutes(5), tester.serviceRegistry().roleMaintainer()); + maintainer.maintain(); + + var tenants = tester.controller().tenants().asList(); + var apps = tester.controller().applications().asList(); + assertEquals(2, tenants.size()); + assertEquals(2, apps.size()); + } - var expectedRoles = Roles.tenantRoles(deletedTenant); - var actualRoles = userManagementMaintainer.findLeftoverRoles(); + private ControllerTester createTester(SystemName systemName) { + var tester = new ControllerTester(systemName); + tester.createTenant(TENANT_1); + tester.createTenant(TENANT_2); + tester.createApplication(TENANT_1, APP_NAME); + tester.createApplication(TENANT_2, APP_NAME); - assertEquals(expectedRoles.size(), actualRoles.size()); - assertTrue(expectedRoles.containsAll(actualRoles) && actualRoles.containsAll(expectedRoles)); + var tenantToDelete = tester.controller().tenants().get(TENANT_1).get(); + tester.serviceRegistry().roleMaintainerMock().mockTenantToDelete(tenantToDelete); + return tester; } -} +}
\ No newline at end of file diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java index f1421b5affd..b33f8f6f7e7 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java @@ -23,9 +23,9 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.application.QuotaUsage; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics; -import com.yahoo.vespa.hosted.controller.rotation.RotationId; -import com.yahoo.vespa.hosted.controller.rotation.RotationState; -import com.yahoo.vespa.hosted.controller.rotation.RotationStatus; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationId; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationState; +import com.yahoo.vespa.hosted.controller.routing.rotation.RotationStatus; import org.junit.Test; import java.nio.file.Files; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java index 2e36b8969ba..422188420bd 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java @@ -1,19 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.persistence; -import com.google.common.collect.ImmutableMap; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.application.EndpointId; -import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy; import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId; +import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; import org.junit.Test; import java.time.Instant; import java.util.Iterator; +import java.util.List; import java.util.Optional; import java.util.Set; @@ -37,24 +37,24 @@ public class RoutingPolicySerializerTest { var id2 = new RoutingPolicyId(owner, ClusterSpec.Id.from("my-cluster2"), ZoneId.from("prod", "us-north-2")); - var policies = ImmutableMap.of(id1, new RoutingPolicy(id1, - HostName.from("long-and-ugly-name"), - Optional.of("zone1"), - instanceEndpoints, - applicationEndpoints, - new RoutingPolicy.Status(true, RoutingStatus.DEFAULT)), - id2, new RoutingPolicy(id2, - HostName.from("long-and-ugly-name-2"), - Optional.empty(), - instanceEndpoints, - Set.of(), - new RoutingPolicy.Status(false, - new RoutingStatus(RoutingStatus.Value.out, - RoutingStatus.Agent.tenant, - Instant.ofEpochSecond(123))))); + var policies = List.of(new RoutingPolicy(id1, + HostName.from("long-and-ugly-name"), + Optional.of("zone1"), + instanceEndpoints, + applicationEndpoints, + new RoutingPolicy.Status(true, RoutingStatus.DEFAULT)), + new RoutingPolicy(id2, + HostName.from("long-and-ugly-name-2"), + Optional.empty(), + instanceEndpoints, + Set.of(), + new RoutingPolicy.Status(false, + new RoutingStatus(RoutingStatus.Value.out, + RoutingStatus.Agent.tenant, + Instant.ofEpochSecond(123))))); var serialized = serializer.fromSlime(owner, serializer.toSlime(policies)); assertEquals(policies.size(), serialized.size()); - for (Iterator<RoutingPolicy> it1 = policies.values().iterator(), it2 = serialized.values().iterator(); it1.hasNext();) { + for (Iterator<RoutingPolicy> it1 = policies.iterator(), it2 = serialized.iterator(); it1.hasNext();) { var expected = it1.next(); var actual = it2.next(); assertEquals(expected.id(), actual.id()); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ApplicationRequestToDiscFilterRequestWrapper.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ApplicationRequestToDiscFilterRequestWrapper.java index b25cb913c83..5f580b6f6b3 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ApplicationRequestToDiscFilterRequestWrapper.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ApplicationRequestToDiscFilterRequestWrapper.java @@ -2,13 +2,9 @@ package com.yahoo.vespa.hosted.controller.restapi; import com.yahoo.application.container.handler.Request; -import com.yahoo.jdisc.HeaderFields; -import com.yahoo.jdisc.http.Cookie; import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.jdisc.http.filter.DiscFilterRequest; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpRequest; -import java.net.SocketAddress; import java.net.URI; import java.security.Principal; import java.security.cert.X509Certificate; @@ -16,7 +12,11 @@ import java.util.Collections; import java.util.Enumeration; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; /** * Wraps an {@link Request} into a {@link DiscFilterRequest}. Only a few methods are supported. @@ -35,72 +35,20 @@ public class ApplicationRequestToDiscFilterRequestWrapper extends DiscFilterRequ } public ApplicationRequestToDiscFilterRequestWrapper(Request request, List<X509Certificate> clientCertificateChain) { - super(new ServletOrJdiscHttpRequest() { - @Override - public void copyHeaders(HeaderFields target) { - request.getHeaders().forEach(target::add); - } - - @Override - public Map<String, List<String>> parameters() { - return Collections.emptyMap(); - } - - @Override - public URI getUri() { - return URI.create(request.getUri()).normalize(); // Consistent with what JDisc does. - } - - @Override - public HttpRequest.Version getVersion() { - throw new UnsupportedOperationException(); - } - - @Override - public String getRemoteHostAddress() { - throw new UnsupportedOperationException(); - } - - @Override - public String getRemoteHostName() { - throw new UnsupportedOperationException(); - } - - @Override - public int getRemotePort() { - throw new UnsupportedOperationException(); - } - - @Override - public void setRemoteAddress(SocketAddress remoteAddress) { - throw new UnsupportedOperationException(); - } - - @Override - public Map<String, Object> context() { - throw new UnsupportedOperationException(); - } - - @Override - public List<Cookie> decodeCookieHeader() { - throw new UnsupportedOperationException(); - } - - @Override - public void encodeCookieHeader(List<Cookie> cookies) { - throw new UnsupportedOperationException(); - } - - @Override - public long getConnectedAt(TimeUnit unit) { - throw new UnsupportedOperationException(); - } - }); + super(createDummyHttpRequest(request)); this.request = request; this.userPrincipal = request.getUserPrincipal().orElse(null); this.clientCertificateChain = clientCertificateChain; } + private static HttpRequest createDummyHttpRequest(Request req) { + HttpRequest dummy = mock(HttpRequest.class, invocation -> { throw new UnsupportedOperationException(); }); + doReturn(URI.create(req.getUri()).normalize()).when(dummy).getUri(); + doNothing().when(dummy).copyHeaders(any()); + doReturn(Map.of()).when(dummy).parameters(); + return dummy; + } + public Request getUpdatedRequest() { Request updatedRequest = new Request(this.request.getUri(), this.request.getBody(), this.request.getMethod(), this.userPrincipal); this.request.getHeaders().forEach(updatedRequest.getHeaders()::put); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java index ae6232ae419..b6aa2313ab3 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java @@ -52,7 +52,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringData; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceAllocation; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient; -import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; @@ -69,6 +68,7 @@ import com.yahoo.vespa.hosted.controller.notification.NotificationSource; import com.yahoo.vespa.hosted.controller.restapi.ContainerTester; import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest; import com.yahoo.vespa.hosted.controller.routing.RoutingStatus; +import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext; import com.yahoo.vespa.hosted.controller.security.AthenzCredentials; import com.yahoo.vespa.hosted.controller.security.AthenzTenantSpec; import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant; @@ -357,9 +357,6 @@ public class ApplicationApiTest extends ControllerContainerTest { ATHENZ_TENANT_DOMAIN_2, id2.application()); - // Trigger upgrade and then application change - deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0"))); - // POST an application package and a test jar, submitting a new application for production deployment. tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST) .screwdriverIdentity(SCREWDRIVER_ID) @@ -388,11 +385,6 @@ public class ApplicationApiTest extends ControllerContainerTest { // GET application having both change and outstanding change tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET) - .userIdentity(USER_ID), - new File("application2.json")); - - // GET application having both change and outstanding change - tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET) .screwdriverIdentity(SCREWDRIVER_ID), new File("application2.json")); @@ -524,6 +516,17 @@ public class ApplicationApiTest extends ControllerContainerTest { .userIdentity(USER_ID), new File("proton-metrics.json")); + // POST a roll-out of the latest application + tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/application", POST) + .userIdentity(USER_ID), + "{\"message\":\"Triggered application change to 1.0.1-commit1 for tenant1.application1.instance1\"}"); + + // POST a roll-out of a given revision + tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/application", POST) + .data("{ \"build\": 1 }") + .userIdentity(USER_ID), + "{\"message\":\"Triggered application change to 1.0.1-commit1 for tenant1.application1.instance1\"}"); + // DELETE (cancel) ongoing change tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE) .userIdentity(HOSTED_VESPA_OPERATOR), @@ -891,6 +894,8 @@ public class ApplicationApiTest extends ControllerContainerTest { // Forget a deleted tenant tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).properties(Map.of("forget", "true")) + .data("{\"athensDomain\":\"domain1\"}") + .oktaAccessToken(OKTA_AT).oktaIdentityToken(OKTA_IT) .userIdentity(HOSTED_VESPA_OPERATOR), "{\"message\":\"Deleted tenant tenant1\"}"); tester.assertResponse(request("/application/v4/tenant/tenant1", GET).properties(Map.of("includeDeleted", "true")) @@ -1868,13 +1873,12 @@ public class ApplicationApiTest extends ControllerContainerTest { } private void assertGlobalRouting(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) { - var changedAt = tester.controller().clock().instant(); - var westPolicies = tester.controller().routing().policies().get(deployment); - assertEquals(1, westPolicies.size()); - var westPolicy = westPolicies.values().iterator().next(); - assertEquals(value, westPolicy.status().routingStatus().value()); - assertEquals(agent, westPolicy.status().routingStatus().agent()); - assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), westPolicy.status().routingStatus().changedAt()); + Instant changedAt = tester.controller().clock().instant(); + DeploymentRoutingContext context = tester.controller().routing().of(deployment); + RoutingStatus status = context.routingStatus(); + assertEquals(value, status.value()); + assertEquals(agent, status.agent()); + assertEquals(changedAt, status.changedAt()); } private static class RequestBuilder implements Supplier<Request> { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json index 28732acb1df..df3f9699677 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json @@ -23,9 +23,6 @@ { "instance": "instance1", "deploying": { - "version": "7" - }, - "outstandingChange": { "revision": { "buildNumber": 1, "hash": "1.0.1-commit1", diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json index d009af005e4..9ef46247629 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json @@ -22,9 +22,6 @@ { "instance": "instance1", "deploying": { - "version": "7" - }, - "outstandingChange": { "revision": { "buildNumber": 1, "hash": "1.0.1-commit1", diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/global-rotation-get.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/global-rotation-get.json index 934e0cf43b9..de2266fd197 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/global-rotation-get.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/global-rotation-get.json @@ -4,7 +4,7 @@ { "status": "in", "reason": "", - "agent": "", + "agent": "unknown", "timestamp": 1497618757 } ] diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java index c9a09116263..6b6ced68b0a 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java @@ -26,6 +26,7 @@ import org.junit.Test; import java.io.File; import java.time.Duration; +import java.time.Instant; import java.util.List; import static org.junit.Assert.assertFalse; @@ -49,6 +50,7 @@ public class OsApiTest extends ControllerContainerTest { @Before public void before() { tester = new ContainerTester(container, responses); + tester.serviceRegistry().clock().setInstant(Instant.ofEpochMilli(1234)); addUserToHostedOperatorRole(operator); zoneRegistryMock().setSystemName(SystemName.cd) .setZones(zone1, zone2, zone3) diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-all-upgraded.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-all-upgraded.json index e1b70c780c5..a5af4f45370 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-all-upgraded.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-all-upgraded.json @@ -4,6 +4,7 @@ "version": "7.5.2", "targetVersion": true, "upgradeBudget": "PT0S", + "scheduledAt": 1234, "cloud": "cloud1", "nodes": [ { @@ -102,6 +103,7 @@ "version": "8.2.1", "targetVersion": true, "upgradeBudget": "PT24H", + "scheduledAt": 1234, "cloud": "cloud2", "nodes": [ { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-partially-upgraded.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-partially-upgraded.json index a9186a9d733..4e026a2c881 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-partially-upgraded.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/responses/versions-partially-upgraded.json @@ -56,6 +56,7 @@ "version": "7.5.2", "targetVersion": true, "upgradeBudget": "PT0S", + "scheduledAt": 1234, "cloud": "cloud1", "nodes": [ { @@ -161,6 +162,7 @@ "version": "8.2.1", "targetVersion": true, "upgradeBudget": "PT24H", + "scheduledAt": 1234, "cloud": "cloud2", "nodes": [] } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java index 1d41beb8a99..537f6c48bdf 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java @@ -8,6 +8,7 @@ import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.controller.ControllerTester; +import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; import com.yahoo.vespa.hosted.controller.api.integration.user.User; import com.yahoo.vespa.hosted.controller.api.role.Role; import com.yahoo.vespa.hosted.controller.restapi.ContainerTester; @@ -270,4 +271,27 @@ public class UserApiTest extends ControllerContainerCloudTest { new File("user-without-trial-capacity-cloud.json")); } } + + @Test + public void supportTenant() { + try (Flags.Replacer ignored = Flags.clearFlagsForTesting(PermanentFlags.MAX_TRIAL_TENANTS.id(), PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id())) { + ContainerTester tester = new ContainerTester(container, responseFiles); + ((InMemoryFlagSource) tester.controller().flagSource()) + .withIntFlag(PermanentFlags.MAX_TRIAL_TENANTS.id(), 10) + .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true); + ControllerTester controller = new ControllerTester(tester); + User user = new User("dev@domail", "Joe Developer", "dev", null); + + var tenant1 = controller.createTenant("tenant1", Tenant.Type.cloud); + var tenant2 = controller.createTenant("tenant2", Tenant.Type.cloud); + controller.serviceRegistry().billingController().setPlan(tenant2, PlanId.from("paid"), false); + + tester.assertResponse( + request("/user/v1/user") + .roles(Role.reader(tenant1), Role.reader(tenant2)) + .user(user), + new File("user-with-supported-tenant.json")); + } + + } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json index 006c3b98a4d..0211f595ce7 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json @@ -10,6 +10,7 @@ }, "tenants": { "sandbox": { + "supported": (ignore), "roles": [ "administrator", "developer", @@ -17,6 +18,7 @@ ] }, "tenant1": { + "supported": (ignore), "roles": [ "administrator", "developer", @@ -24,6 +26,7 @@ ] }, "tenant2": { + "supported": (ignore), "roles": [ "administrator", "developer", diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json index 4ae55e97baa..76904bf9bb4 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json @@ -10,17 +10,20 @@ }, "tenants": { "sandbox": { + "supported": false, "roles": [ "developer", "reader" ] }, "tenant1": { + "supported": false, "roles": [ "administrator" ] }, "tenant2": { + "supported": false, "roles": [ "developer" ] diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-supported-tenant.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-supported-tenant.json new file mode 100644 index 00000000000..a40354a9e71 --- /dev/null +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-supported-tenant.json @@ -0,0 +1,35 @@ +{ + "isPublic": true, + "isCd": false, + "hasTrialCapacity": true, + "user": { + "name": "Joe Developer", + "email": "dev@domail", + "nickname": "dev", + "verified": false + }, + "tenants": { + "tenant1": { + "supported": false, + "roles": [ + "reader" + ] + }, + "tenant2": { + "supported": true, + "roles": [ + "reader" + ] + } + }, + "flags": [ + { + "id": "enable-public-signup-flow", + "rules": [ + { + "value": false + } + ] + } + ] +}
\ No newline at end of file diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java index c40cb20a0bc..1919de33e8b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java @@ -41,7 +41,6 @@ import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -124,22 +123,33 @@ public class RoutingPoliciesTest { context2.submit(applicationPackage3).deferLoadBalancerProvisioningIn(Environment.prod).deploy(); tester.assertTargets(context2.instanceId(), EndpointId.of("r0"), 0, zone1, zone2); + // A deployment of app2 is removed + var applicationPackage4 = applicationPackageBuilder() + .region(zone1.region()) + .endpoint("r0", "c0") + .allow(ValidationId.globalEndpointChange) + .allow(ValidationId.deploymentRemoval) + .build(); + context2.submit(applicationPackage4).deferLoadBalancerProvisioningIn(Environment.prod).deploy(); + tester.assertTargets(context2.instanceId(), EndpointId.of("r0"), 0, zone1); + assertEquals(1, tester.policiesOf(context2.instanceId()).size()); + // All endpoints for app1 are removed - ApplicationPackage applicationPackage4 = applicationPackageBuilder() + ApplicationPackage applicationPackage5 = applicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .allow(ValidationId.globalEndpointChange) .build(); - context1.submit(applicationPackage4).deferLoadBalancerProvisioningIn(Environment.prod).deploy(); + context1.submit(applicationPackage5).deferLoadBalancerProvisioningIn(Environment.prod).deploy(); tester.assertTargets(context1.instanceId(), EndpointId.of("r0"), 0); tester.assertTargets(context1.instanceId(), EndpointId.of("r1"), 0); tester.assertTargets(context1.instanceId(), EndpointId.of("r2"), 0); var policies = tester.policiesOf(context1.instanceId()); assertEquals(clustersPerZone * numberOfDeployments, policies.size()); assertTrue("Rotation membership is removed from all policies", - policies.stream().allMatch(policy -> policy.instanceEndpoints().isEmpty())); - assertEquals("Rotations for " + context2.application() + " are not removed", 2, tester.aliasDataOf(endpoint4).size()); + policies.asList().stream().allMatch(policy -> policy.instanceEndpoints().isEmpty())); + assertEquals("Rotations for " + context2.application() + " are not removed", 1, tester.aliasDataOf(endpoint4).size()); } @Test @@ -306,8 +316,8 @@ public class RoutingPoliciesTest { "c1.app1.tenant1.us-central-1.vespa.oath.cloud" ); assertEquals(expectedRecords, tester.recordNames()); - assertTrue("Removes stale routing policies " + context2.application(), tester.routingPolicies().get(context2.instanceId()).isEmpty()); - assertEquals("Keeps routing policies for " + context1.application(), 4, tester.routingPolicies().get(context1.instanceId()).size()); + assertTrue("Removes stale routing policies " + context2.application(), tester.routingPolicies().read(context2.instanceId()).isEmpty()); + assertEquals("Keeps routing policies for " + context1.application(), 4, tester.routingPolicies().read(context1.instanceId()).size()); } @Test @@ -490,13 +500,13 @@ public class RoutingPoliciesTest { tester.assertTargets(context.instanceId(), EndpointId.of("r1"), 0, zone2); // Status details is stored in policy - var policy1 = tester.routingPolicies().get(context.deploymentIdIn(zone1)).values().iterator().next(); + var policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get(); assertEquals(RoutingStatus.Value.out, policy1.status().routingStatus().value()); assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent()); assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt()); // Other zone remains in - var policy2 = tester.routingPolicies().get(context.deploymentIdIn(zone2)).values().iterator().next(); + var policy2 = tester.routingPolicies().read(context.deploymentIdIn(zone2)).first().get(); assertEquals(RoutingStatus.Value.in, policy2.status().routingStatus().value()); assertEquals(RoutingStatus.Agent.system, policy2.status().routingStatus().agent()); assertEquals(Instant.EPOCH, policy2.status().routingStatus().changedAt()); @@ -515,7 +525,7 @@ public class RoutingPoliciesTest { tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone1, zone2); tester.assertTargets(context.instanceId(), EndpointId.of("r1"), 0, zone1, zone2); - policy1 = tester.routingPolicies().get(context.deploymentIdIn(zone1)).values().iterator().next(); + policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get(); assertEquals(RoutingStatus.Value.in, policy1.status().routingStatus().value()); assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent()); assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt()); @@ -568,9 +578,9 @@ public class RoutingPoliciesTest { tester.assertTargets(context1.instanceId(), EndpointId.defaultId(), 0, zone1); tester.assertTargets(context2.instanceId(), EndpointId.defaultId(), 0, zone1); for (var context : contexts) { - var policies = tester.routingPolicies().get(context.instanceId()); + var policies = tester.routingPolicies().read(context.instanceId()); assertTrue("Global routing status for policy remains " + RoutingStatus.Value.in, - policies.values().stream() + policies.asList().stream() .map(RoutingPolicy::status) .map(RoutingPolicy.Status::routingStatus) .map(RoutingStatus::value) @@ -668,7 +678,7 @@ public class RoutingPoliciesTest { // Setting zone (containing active deployment) out puts all deployments in tester.routingPolicies().setRoutingStatus(zone1, RoutingStatus.Value.out); context.flushDnsUpdates(); - assertEquals(RoutingStatus.Value.out, tester.routingPolicies().get(zone1).routingStatus().value()); + assertEquals(RoutingStatus.Value.out, tester.routingPolicies().read(zone1).routingStatus().value()); tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, ImmutableMap.of(zone1, 0L, zone2, 0L)); // Setting zone back in removes the currently inactive deployment @@ -680,7 +690,7 @@ public class RoutingPoliciesTest { tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone2), RoutingStatus.Value.in, RoutingStatus.Agent.tenant); context.flushDnsUpdates(); - for (var policy : tester.routingPolicies().get(context.instanceId()).values()) { + for (var policy : tester.routingPolicies().read(context.instanceId())) { assertSame(RoutingStatus.Value.in, policy.status().routingStatus().value()); } tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone1, zone2); @@ -693,7 +703,7 @@ public class RoutingPoliciesTest { RecordName name = RecordName.from("cfg.prod.us-west-1.test.vip"); tester.provisionLoadBalancers(1, app, zone1); - tester.routingPolicies().refresh(app, DeploymentSpec.empty, zone1); + tester.routingPolicies().refresh(new DeploymentId(app, zone1), DeploymentSpec.empty); new NameServiceDispatcher(tester.tester.controller(), Duration.ofSeconds(Integer.MAX_VALUE)).run(); List<Record> records = tester.controllerTester().nameService().findRecords(Record.Type.CNAME, name); @@ -793,18 +803,12 @@ public class RoutingPoliciesTest { var applicationPackage = applicationPackageBuilder() .instances("beta,main") .region(zone1.region()) - .region(zone2.region()) .applicationEndpoint("a0", "c0", "us-west-1", Map.of(betaInstance.instance(), 2, mainInstance.instance(), 8)) - .applicationEndpoint("a1", "c1", "us-central-1", - Map.of(betaInstance.instance(), 4, - mainInstance.instance(), 0)) .build(); - for (var zone : List.of(zone1, zone2)) { - tester.provisionLoadBalancers(2, betaInstance, zone); - tester.provisionLoadBalancers(2, mainInstance, zone); - } + tester.provisionLoadBalancers(1, betaInstance, zone1); + tester.provisionLoadBalancers(1, mainInstance, zone1); // Deploy both instances betaContext.submit(applicationPackage).deploy(); @@ -812,35 +816,36 @@ public class RoutingPoliciesTest { // Application endpoint points to both instances with correct weights DeploymentId betaZone1 = betaContext.deploymentIdIn(zone1); DeploymentId mainZone1 = mainContext.deploymentIdIn(zone1); - DeploymentId betaZone2 = betaContext.deploymentIdIn(zone2); - DeploymentId mainZone2 = mainContext.deploymentIdIn(zone2); tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0, Map.of(betaZone1, 2, mainZone1, 8)); - tester.assertTargets(application, EndpointId.of("a1"), ClusterSpec.Id.from("c1"), 1, - Map.of(betaZone2, 4, - mainZone2, 0)); - // Changing routing status updates weight + // Changing routing status removes deployment from DNS tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.out, RoutingStatus.Agent.tenant); betaContext.flushDnsUpdates(); tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0, - Map.of(betaZone1, 2, - mainZone1, 0)); - tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant); + Map.of(betaZone1, 2)); + + // Changing routing status for remaining deployment adds back all deployments, because removing all deployments + // puts all IN + tester.routingPolicies().setRoutingStatus(betaZone1, RoutingStatus.Value.out, RoutingStatus.Agent.tenant); betaContext.flushDnsUpdates(); tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0, Map.of(betaZone1, 2, mainZone1, 8)); - // Changing routing status preserves weights if change in routing status would result in a zero weight sum - // Otherwise this would result in both targets have weight 0 and thus traffic would be distributed evenly across - // all targets which does not match intention of taking out a deployment - tester.routingPolicies().setRoutingStatus(betaZone2, RoutingStatus.Value.out, RoutingStatus.Agent.tenant); + // Activating main deployment allows us to deactivate the beta deployment + tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant); betaContext.flushDnsUpdates(); - tester.assertTargets(application, EndpointId.of("a1"), ClusterSpec.Id.from("c1"), 1, - Map.of(betaZone2, 4, - mainZone2, 0)); + tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0, + Map.of(mainZone1, 8)); + + // Activate all deployments again + tester.routingPolicies().setRoutingStatus(betaZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant); + betaContext.flushDnsUpdates(); + tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0, + Map.of(betaZone1, 2, + mainZone1, 8)); } /** Returns an application package builder that satisfies requirements for a directly routed endpoint */ @@ -936,8 +941,8 @@ public class RoutingPoliciesTest { provisionLoadBalancers(clustersPerZone, application, false, zones); } - private Collection<RoutingPolicy> policiesOf(ApplicationId instance) { - return tester.controller().curator().readRoutingPolicies(instance).values(); + private RoutingPolicyList policiesOf(ApplicationId instance) { + return tester.controller().routing().policies().read(instance); } private Set<String> recordNames() { @@ -995,8 +1000,8 @@ public class RoutingPoliciesTest { for (var zone : zoneWeights.keySet()) { DeploymentId deployment = new DeploymentId(instance, zone); EndpointList regionEndpoints = tester.controller().routing().readEndpointsOf(deployment) - .cluster(cluster) - .scope(Endpoint.Scope.weighted); + .cluster(cluster) + .scope(Endpoint.Scope.weighted); Endpoint regionEndpoint = regionEndpoints.first().orElseThrow(() -> new IllegalArgumentException("No region endpoint found for " + cluster + " in " + deployment)); zonesByRegionEndpoint.computeIfAbsent(regionEndpoint.dnsName(), (k) -> new ArrayList<>()) .add(zone); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationRepositoryTest.java index e7c2eacbd02..9a56123e8e3 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/rotation/RotationRepositoryTest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.rotation; +package com.yahoo.vespa.hosted.controller.routing.rotation; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; @@ -205,10 +205,9 @@ public class RotationRepositoryTest { private void assertSingleRotation(Rotation expected, List<AssignedRotation> assignedRotations, RotationRepository repository) { assertEquals(1, assignedRotations.size()); - var rotationId = assignedRotations.get(0).rotationId(); - var rotation = repository.getRotation(rotationId); - assertTrue(rotationId + " exists", rotation.isPresent()); - assertEquals(expected, rotation.get()); + RotationId rotationId = assignedRotations.get(0).rotationId(); + Rotation rotation = repository.requireRotation(rotationId); + assertEquals(expected, rotation); } private static List<RotationId> rotationIds(List<AssignedRotation> assignedRotations) { diff --git a/controller-server/src/test/resources/test_runner_services.xml-cd b/controller-server/src/test/resources/test_runner_services.xml-cd index 634137e3fb6..4ab4b08351a 100644 --- a/controller-server/src/test/resources/test_runner_services.xml-cd +++ b/controller-server/src/test/resources/test_runner_services.xml-cd @@ -27,6 +27,7 @@ <component id="com.yahoo.vespa.testrunner.VespaCliTestRunner" bundle="vespa-osgi-testrunner"> <config name="com.yahoo.vespa.testrunner.vespa-cli-test-runner"> <artifactsPath>artifacts</artifactsPath> + <testsPath>tests</testsPath> <useAthenzCredentials>true</useAthenzCredentials> </config> </component> diff --git a/default_build_settings.cmake b/default_build_settings.cmake index b0dfed2bfd5..599aca098ec 100644 --- a/default_build_settings.cmake +++ b/default_build_settings.cmake @@ -32,16 +32,22 @@ function(setup_vespa_default_build_settings_centos_8) message("-- Setting up default build settings for centos 8") set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream") - set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "13" PARENT_SCOPE) else() set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) endif() endfunction() -function(setup_vespa_default_build_settings_rocky_8_4) - message("-- Setting up default build settings for rocky 8.4") +function(setup_vespa_default_build_settings_rocky_8_5) + message("-- Setting up default build settings for rocky 8.5") set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) - set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) +endfunction() + +function(setup_vespa_default_build_settings_almalinux_8_5) + message("-- Setting up default build settings for almalinux 8.5") + set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) endfunction() function(setup_vespa_default_build_settings_darwin) @@ -192,8 +198,10 @@ function(vespa_use_default_build_settings) setup_vespa_default_build_settings_centos_7() elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "centos 8") setup_vespa_default_build_settings_centos_8() - elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.4") - setup_vespa_default_build_settings_rocky_8_4() + elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.5") + setup_vespa_default_build_settings_rocky_8_5() + elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "almalinux 8.5") + setup_vespa_default_build_settings_almalinux_8_5() elseif(VESPA_OS_DISTRO STREQUAL "darwin") setup_vespa_default_build_settings_darwin() elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "fedora 32") @@ -9,7 +9,7 @@ fi VERSION="$1" mkdir -p ~/rpmbuild/{SOURCES,SPECS} -GZIP=-1 tar -zcf ~/rpmbuild/SOURCES/vespa-$VERSION.tar.gz --exclude target --exclude cmake-build-debug --transform "flags=r;s,^,vespa-$VERSION/," * +git archive --format=tar.gz --prefix=vespa-$VERSION/ -o ~/rpmbuild/SOURCES/vespa-$VERSION.tar.gz HEAD DIST_FILE="dist/vespa.spec" # When checking out relase tags, the vespa.spec is in the source root folder. This is a workaround to be able to build rpms from a release tag. diff --git a/dist/vespa.spec b/dist/vespa.spec index bfb4c14bdeb..f18c802d5fc 100644 --- a/dist/vespa.spec +++ b/dist/vespa.spec @@ -6,6 +6,11 @@ # Only strip debug info %global _find_debuginfo_opts -g +# Go binaries' build-ids are not recognized by RPMs yet, see +# https://github.com/rpm-software-management/rpm/issues/367 and +# https://github.com/tpokorra/lbs-mono-fedora/issues/3#issuecomment-219857688. +%undefine _missing_build_ids_terminate_build + # Force special prefix for Vespa %define _prefix /opt/vespa %define _vespa_deps_prefix /opt/vespa-deps @@ -57,10 +62,18 @@ BuildRequires: vespa-pybind11-devel BuildRequires: python3-devel %endif %if 0%{?el8} +%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0) +%if 0%{?_centos_stream} +BuildRequires: gcc-toolset-11-gcc-c++ +BuildRequires: gcc-toolset-11-binutils +BuildRequires: gcc-toolset-11-libatomic-devel +%define _devtoolset_enable /opt/rh/gcc-toolset-11/enable +%else BuildRequires: gcc-toolset-10-gcc-c++ BuildRequires: gcc-toolset-10-binutils BuildRequires: gcc-toolset-10-libatomic-devel %define _devtoolset_enable /opt/rh/gcc-toolset-10/enable +%endif BuildRequires: maven BuildRequires: pybind11-devel BuildRequires: python3-pytest @@ -97,9 +110,8 @@ BuildRequires: cmake >= 3.11.4-3 BuildRequires: libarchive %endif %define _command_cmake cmake -%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0) %if 0%{?_centos_stream} -BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13) +BuildRequires: (llvm-devel >= 13.0.0 and llvm-devel < 14) %else BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13) %endif @@ -187,7 +199,13 @@ BuildRequires: java-11-openjdk-devel %endif BuildRequires: rpm-build BuildRequires: make +%if 0%{?el7} && ! 0%{?amzn2} +BuildRequires: rh-git227 +%define _rhgit227_enable /opt/rh/rh-git227/enable +%else BuildRequires: git +%endif +BuildRequires: golang BuildRequires: systemd BuildRequires: flex >= 2.5.0 BuildRequires: bison >= 3.0.0 @@ -244,7 +262,7 @@ Requires: vespa-gtest = 1.11.0 %if 0%{?el8} %if 0%{?centos} || 0%{?rocky} %if 0%{?_centos_stream} -%define _vespa_llvm_version 12 +%define _vespa_llvm_version 13 %else %define _vespa_llvm_version 12 %endif @@ -275,18 +293,13 @@ Requires: gtest %define _extra_link_directory %{_vespa_deps_prefix}/lib64 %define _extra_include_directory %{_vespa_deps_prefix}/include;/usr/include/openblas %endif -%ifnarch x86_64 -%define _skip_vespamalloc 1 -%endif Requires: %{name}-base = %{version}-%{release} Requires: %{name}-base-libs = %{version}-%{release} Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-clients = %{version}-%{release} Requires: %{name}-config-model-fat = %{version}-%{release} Requires: %{name}-jars = %{version}-%{release} -%if ! 0%{?_skip_vespamalloc:1} Requires: %{name}-malloc = %{version}-%{release} -%endif Requires: %{name}-tools = %{version}-%{release} # Ugly workaround because vespamalloc/src/vespamalloc/malloc/mmap.cpp uses the private @@ -373,7 +386,7 @@ Requires: openssl-libs %if 0%{?el8} %if 0%{?centos} || 0%{?rocky} %if 0%{?_centos_stream} -Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13) +Requires: (llvm-libs >= 13.0.0 and llvm-libs < 14) %else Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13) %endif @@ -441,7 +454,6 @@ Summary: Vespa - The open big data serving engine - shared java jar files Vespa - The open big data serving engine - shared java jar files -%if ! 0%{?_skip_vespamalloc:1} %package malloc Summary: Vespa - The open big data serving engine - malloc library @@ -449,7 +461,6 @@ Summary: Vespa - The open big data serving engine - malloc library %description malloc Vespa - The open big data serving engine - malloc library -%endif %package tools @@ -509,6 +520,9 @@ source %{_devtoolset_enable} || true %if 0%{?_rhmaven35_enable:1} source %{_rhmaven35_enable} || true %endif +%if 0%{?_rhgit227_enable:1} +source %{_rhgit227_enable} || true +%endif %if 0%{?_java_home:1} export JAVA_HOME=%{?_java_home} @@ -535,6 +549,7 @@ mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3 . make %{_smp_mflags} +VERSION=%{version} make -C client/go install-all %endif %install @@ -544,6 +559,14 @@ rm -rf %{buildroot} cp -r %{installdir} %{buildroot} %else make install DESTDIR=%{buildroot} +cp client/go/bin/vespa %{buildroot}%{_prefix}/bin/vespa +mkdir -p %{buildroot}/usr/share +cp -a client/go/share/* %{buildroot}/usr/share +%endif +# Otherwise installation may fail for find-debuginfo.sh/dwz: +# dwz: dwz.c:9899: read_dwarf: Assertion `data != ((void *)0) && data->d_buf != ((void *)0)' failed. +%if 0%{?el7} +strip %{buildroot}%{_prefix}/bin/vespa %endif %if %{_create_vespa_service} @@ -642,9 +665,7 @@ fi %dir %{_prefix}/etc %{_prefix}/etc/systemd %{_prefix}/etc/vespa -%if ! 0%{?_skip_vespamalloc:1} %exclude %{_prefix}/etc/vespamalloc.conf -%endif %{_prefix}/include %dir %{_prefix}/lib %dir %{_prefix}/lib/jars @@ -751,23 +772,25 @@ fi %exclude %{_prefix}/lib64/libvespadefaults.so %exclude %{_prefix}/lib64/libvespalib.so %exclude %{_prefix}/lib64/libvespalog.so -%if ! 0%{?_skip_vespamalloc:1} %exclude %{_prefix}/lib64/vespa -%endif %files clients %if %{_defattr_is_vespa_vespa} %defattr(-,%{_vespa_user},%{_vespa_group},-) %endif %dir %{_prefix} +%dir %{_prefix}/bin %dir %{_prefix}/conf %dir %{_prefix}/conf/vespa-feed-client %dir %{_prefix}/lib %dir %{_prefix}/lib/jars +%{_prefix}/bin/vespa %{_prefix}/bin/vespa-feed-client %{_prefix}/conf/vespa-feed-client/logging.properties %{_prefix}/lib/jars/vespa-http-client-jar-with-dependencies.jar %{_prefix}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar +%docdir /usr/share/man +/usr/share/man %files config-model-fat %if %{_defattr_is_vespa_vespa} @@ -848,7 +871,6 @@ fi %dir %{_prefix}/libexec/vespa %{_prefix}/libexec/vespa/standalone-container.sh -%if ! 0%{?_skip_vespamalloc:1} %files malloc %if %{_defattr_is_vespa_vespa} %defattr(-,%{_vespa_user},%{_vespa_group},-) @@ -858,7 +880,6 @@ fi %config(noreplace) %{_prefix}/etc/vespamalloc.conf %dir %{_prefix}/lib64 %{_prefix}/lib64/vespa -%endif %files tools %if %{_defattr_is_vespa_vespa} diff --git a/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java b/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java index ed996f56078..a35dd0da4f3 100644 --- a/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java +++ b/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java @@ -20,8 +20,7 @@ public class ScriptManagerTestCase { @Test public void requireThatScriptsAreAppliedToSubType() throws ParseException { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); DocumentType docType = typeMgr.getDocumentType("newssummary"); assertNotNull(docType); @@ -36,8 +35,7 @@ public class ScriptManagerTestCase { @Test public void requireThatScriptsAreAppliedToSuperType() throws ParseException { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); DocumentType docType = typeMgr.getDocumentType("newsarticle"); assertNotNull(docType); @@ -51,16 +49,14 @@ public class ScriptManagerTestCase { @Test public void requireThatEmptyConfigurationDoesNotThrow() { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); ScriptManager scriptMgr = new ScriptManager(typeMgr, new IlscriptsConfig(new IlscriptsConfig.Builder()), null, Embedder.throwsOnUse); assertNull(scriptMgr.getScript(new DocumentType("unknown"))); } @Test public void requireThatUnknownDocumentTypeReturnsNull() { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); ScriptManager scriptMgr = new ScriptManager(typeMgr, new IlscriptsConfig(new IlscriptsConfig.Builder()), null, Embedder.throwsOnUse); for (Iterator<DocumentType> it = typeMgr.documentTypeIterator(); it.hasNext(); ) { assertNull(scriptMgr.getScript(it.next())); diff --git a/document/abi-spec.json b/document/abi-spec.json index 39a93b2b2cb..d5ad686cd1f 100644 --- a/document/abi-spec.json +++ b/document/abi-spec.json @@ -501,6 +501,7 @@ "public void <init>(com.yahoo.document.config.DocumentmanagerConfig)", "public void assign(com.yahoo.document.DocumentTypeManager)", "public com.yahoo.document.DocumentTypeManager configure(java.lang.String)", + "public static com.yahoo.document.DocumentTypeManager fromFile(java.lang.String)", "public boolean hasDataType(java.lang.String)", "public boolean hasDataType(int)", "public com.yahoo.document.DataType getDataType(java.lang.String)", @@ -601,839 +602,6 @@ ], "fields": [] }, - "com.yahoo.document.DocumenttypesConfig$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigInstance$Builder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig)", - "public com.yahoo.document.DocumenttypesConfig$Builder enablecompression(boolean)", - "public com.yahoo.document.DocumenttypesConfig$Builder documenttype(com.yahoo.document.DocumenttypesConfig$Documenttype$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Builder documenttype(java.util.List)", - "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", - "public final java.lang.String getDefMd5()", - "public final java.lang.String getDefName()", - "public final java.lang.String getDefNamespace()", - "public final boolean getApplyOnRestart()", - "public final void setApplyOnRestart(boolean)", - "public com.yahoo.document.DocumenttypesConfig build()" - ], - "fields": [ - "public java.util.List documenttype" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder name(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder datatype(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder inherits(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder inherits(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype build()" - ], - "fields": [ - "public java.util.List inherits" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder)", - "public int id()", - "public java.lang.String name()", - "public int datatype()", - "public java.util.List inherits()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Inherits inherits(int)" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder name(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder version(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder headerstruct(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder bodystruct(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder inherits(com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder inherits(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder datatype(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder datatype(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder annotationtype(com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder annotationtype(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder fieldsets(java.lang.String, com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder fieldsets(java.util.Map)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder referencetype(com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder referencetype(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder importedfield(com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Builder importedfield(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype build()" - ], - "fields": [ - "public java.util.List inherits", - "public java.util.List datatype", - "public java.util.List annotationtype", - "public java.util.Map fieldsets", - "public java.util.List referencetype", - "public java.util.List importedfield" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Builder annotation(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation$Builder annotation" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Annotation annotation()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Builder element(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element$Builder element" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Element element()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder type(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder array(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder map(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder wset(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder annotationref(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder sstruct(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array$Builder array", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder map", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder wset", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref$Builder annotationref", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder sstruct" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder key(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder value(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key$Builder key", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value$Builder value" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Key key()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map$Value value()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder name(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder version(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder compression(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder field(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder field(java.util.List)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder compression", - "public java.util.List field" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder type(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder level(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder threshold(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder minsize(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum": { - "superClass": "java.lang.Enum", - "interfaces": [], - "attributes": [ - "public", - "final", - "enum" - ], - "methods": [ - "public static com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum[] values()", - "public static com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum valueOf(java.lang.String)" - ], - "fields": [ - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum NONE", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum LZ4" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type": { - "superClass": "com.yahoo.config.EnumNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum)" - ], - "fields": [ - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum NONE", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum LZ4" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression$Type$Enum type()", - "public int level()", - "public int threshold()", - "public int minsize()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder name(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder datatype(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder detailedtype(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field$Builder)", - "public java.lang.String name()", - "public int id()", - "public int datatype()", - "public java.lang.String detailedtype()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Builder)", - "public java.lang.String name()", - "public int version()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Compression compression()", - "public java.util.List field()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct$Field field(int)" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum": { - "superClass": "java.lang.Enum", - "interfaces": [], - "attributes": [ - "public", - "final", - "enum" - ], - "methods": [ - "public static com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum[] values()", - "public static com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum valueOf(java.lang.String)" - ], - "fields": [ - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum STRUCT", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum ARRAY", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum WSET", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum MAP", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum ANNOTATIONREF", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum PRIMITIVE", - "public static final enum com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum TENSOR" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type": { - "superClass": "com.yahoo.config.EnumNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum)" - ], - "fields": [ - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum STRUCT", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum ARRAY", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum WSET", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum MAP", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum ANNOTATIONREF", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum PRIMITIVE", - "public static final com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum TENSOR" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder key(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder createifnonexistent(boolean)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder removeifzero(boolean)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset build()" - ], - "fields": [ - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key$Builder key" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Builder)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset$Key key()", - "public boolean createifnonexistent()", - "public boolean removeifzero()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Builder)", - "public int id()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Type$Enum type()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Array array()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Map map()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Wset wset()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Annotationref annotationref()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype$Sstruct sstruct()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets$Builder fields(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets$Builder fields(java.util.Collection)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets build()" - ], - "fields": [ - "public java.util.List fields" - ] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets$Builder)", - "public java.util.List fields()", - "public java.lang.String fields(int)" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield$Builder name(java.lang.String)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield$Builder)", - "public java.lang.String name()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits$Builder)", - "public int id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype$Builder": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigBuilder" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype$Builder id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype$Builder target_type_id(int)", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype build()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype$Builder)", - "public int id()", - "public int target_type_id()" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Documenttype": { - "superClass": "com.yahoo.config.InnerNode", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>(com.yahoo.document.DocumenttypesConfig$Documenttype$Builder)", - "public int id()", - "public java.lang.String name()", - "public int version()", - "public int headerstruct()", - "public int bodystruct()", - "public java.util.List inherits()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Inherits inherits(int)", - "public java.util.List datatype()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Datatype datatype(int)", - "public java.util.List annotationtype()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Annotationtype annotationtype(int)", - "public java.util.Map fieldsets()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Fieldsets fieldsets(java.lang.String)", - "public java.util.List referencetype()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Referencetype referencetype(int)", - "public java.util.List importedfield()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype$Importedfield importedfield(int)" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig$Producer": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.config.ConfigInstance$Producer" - ], - "attributes": [ - "public", - "interface", - "abstract" - ], - "methods": [ - "public abstract void getConfig(com.yahoo.document.DocumenttypesConfig$Builder)" - ], - "fields": [] - }, - "com.yahoo.document.DocumenttypesConfig": { - "superClass": "com.yahoo.config.ConfigInstance", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public static java.lang.String getDefMd5()", - "public static java.lang.String getDefName()", - "public static java.lang.String getDefNamespace()", - "public static java.lang.String getDefVersion()", - "public void <init>(com.yahoo.document.DocumenttypesConfig$Builder)", - "public boolean enablecompression()", - "public java.util.List documenttype()", - "public com.yahoo.document.DocumenttypesConfig$Documenttype documenttype(int)" - ], - "fields": [ - "public static final java.lang.String CONFIG_DEF_MD5", - "public static final java.lang.String CONFIG_DEF_NAME", - "public static final java.lang.String CONFIG_DEF_NAMESPACE", - "public static final java.lang.String CONFIG_DEF_VERSION", - "public static final java.lang.String[] CONFIG_DEF_SCHEMA" - ] - }, "com.yahoo.document.ExtendedField$Extract": { "superClass": "java.lang.Object", "interfaces": [], diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java index a6f2923d68f..ff6a7194e7d 100644 --- a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java +++ b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java @@ -65,11 +65,24 @@ public class DocumentTypeManager { annotationTypeRegistry = other.annotationTypeRegistry; } + /** + * For testing, use fromFile factory method instead + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public DocumentTypeManager configure(String configId) { subscriber = DocumentTypeManagerConfigurer.configure(this, configId); return this; } + /** Only for unit tests */ + public static DocumentTypeManager fromFile(String fileName) { + var manager = new DocumentTypeManager(); + var sub = DocumentTypeManagerConfigurer.configure(manager, "file:" + fileName); + sub.close(); + return manager; + } + private void registerDefaultDataTypes() { DocumentType superDocType = DataType.DOCUMENT; dataTypes.put(superDocType.getId(), superDocType); @@ -104,6 +117,10 @@ public class DocumentTypeManager { return false; } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public boolean hasDataType(int code) { if (code == DataType.tensorDataTypeCode) return true; // built-in dynamic: Always present return dataTypes.containsKey(code); @@ -140,6 +157,10 @@ public class DocumentTypeManager { return foundTypes.get(0); } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public DataType getDataType(int code) { return getDataType(code, ""); } /** @@ -148,7 +169,10 @@ public class DocumentTypeManager { * @param code the code of the data type to return, which must be either built in or present in this manager * @param detailedType detailed type information, or the empty string if none * @return the appropriate DataType instance + * + * @deprecated //TODO Will be package-private or removed on Vespa 8 */ + @Deprecated public DataType getDataType(int code, String detailedType) { if (code == DataType.tensorDataTypeCode) // built-in dynamic return new TensorDataType(TensorType.fromSpec(detailedType)); @@ -165,6 +189,11 @@ public class DocumentTypeManager { } } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @SuppressWarnings("deprecation") + @Deprecated DataType getDataTypeAndReturnTemporary(int code, String detailedType) { if (hasDataType(code)) { return getDataType(code, detailedType); @@ -186,8 +215,15 @@ public class DocumentTypeManager { * * @param type The datatype to register */ + @SuppressWarnings("deprecation") void registerSingleType(DataType type) { if (type instanceof TensorDataType) return; // built-in dynamic: Created on the fly + if (type instanceof TemporaryDataType) { + throw new IllegalArgumentException("TemporaryDataType no longer supported: " + type); + } + if (type instanceof TemporaryStructuredDataType) { + throw new IllegalArgumentException("TemporaryStructuredDataType no longer supported: " + type); + } if (dataTypes.containsKey(type.getId())) { DataType existingType = dataTypes.get(type.getId()); if (((type instanceof TemporaryDataType) || (type instanceof TemporaryStructuredDataType)) @@ -269,6 +305,7 @@ public class DocumentTypeManager { DocumentDeserializer data = DocumentDeserializerFactory.create6(this, buf); return new Document(data); } + public Document createDocument(DocumentDeserializer data) { return new Document(data); } @@ -297,7 +334,10 @@ public class DocumentTypeManager { /** * Clears the DocumentTypeManager. After this operation, * only the default document type and data types are available. + * + * @deprecated //TODO Will be package-private or removed on Vespa 8 */ + @Deprecated public void clear() { documentTypes.clear(); dataTypes.clear(); @@ -308,91 +348,10 @@ public class DocumentTypeManager { return annotationTypeRegistry; } - void replaceTemporaryTypes() { - for (DataType type : dataTypes.values()) { - List<DataType> seenStructs = new LinkedList<>(); - replaceTemporaryTypes(type, seenStructs); - } - } - - private void replaceTemporaryTypes(DataType type, List<DataType> seenStructs) { - if (type instanceof WeightedSetDataType) { - replaceTemporaryTypesInWeightedSet((WeightedSetDataType) type, seenStructs); - } else if (type instanceof MapDataType) { - replaceTemporaryTypesInMap((MapDataType) type, seenStructs); - } else if (type instanceof CollectionDataType) { - replaceTemporaryTypesInCollection((CollectionDataType) type, seenStructs); - } else if (type instanceof StructDataType) { - replaceTemporaryTypesInStruct((StructDataType) type, seenStructs); - } else if (type instanceof PrimitiveDataType) { - //OK because these types are always present - } else if (type instanceof AnnotationReferenceDataType) { - //OK because this type is always present - } else if (type instanceof DocumentType) { - //OK because this type is always present - } else if (type instanceof TensorDataType) { - //OK because this type is always present - } else if (type instanceof ReferenceDataType) { - replaceTemporaryTypeInReference((ReferenceDataType) type); - } else if (type instanceof TemporaryDataType) { - throw new IllegalStateException("TemporaryDataType registered in DocumentTypeManager, BUG!!"); - } else { - log.warning("Don't know how to replace temporary data types in " + type); - } - } - - @SuppressWarnings("deprecation") - private void replaceTemporaryTypesInStruct(StructDataType structDataType, List<DataType> seenStructs) { - seenStructs.add(structDataType); - for (Field field : structDataType.getFieldsThisTypeOnly()) { - DataType fieldType = field.getDataType(); - if (fieldType instanceof TemporaryDataType) { - field.setDataType(getDataType(fieldType.getCode(), ((TemporaryDataType)fieldType).getDetailedType())); - } else { - if (!seenStructs.contains(fieldType)) { - replaceTemporaryTypes(fieldType, seenStructs); - } - } - } - } - - private void replaceTemporaryTypeInReference(ReferenceDataType referenceDataType) { - if (referenceDataType.getTargetType() instanceof TemporaryStructuredDataType) { - referenceDataType.setTargetType((DocumentType) getDataType(referenceDataType.getTargetType().getId())); - } - // TODO should we recursively invoke replaceTemporaryTypes for the target type? It should only ever be a doc type - } - - private void replaceTemporaryTypesInCollection(CollectionDataType collectionDataType, List<DataType> seenStructs) { - if (collectionDataType.getNestedType() instanceof TemporaryDataType) { - collectionDataType.setNestedType(getDataType(collectionDataType.getNestedType().getCode(), "")); - } else { - replaceTemporaryTypes(collectionDataType.getNestedType(), seenStructs); - } - } - - private void replaceTemporaryTypesInMap(MapDataType mapDataType, List<DataType> seenStructs) { - if (mapDataType.getValueType() instanceof TemporaryDataType) { - mapDataType.setValueType(getDataType(mapDataType.getValueType().getCode(), "")); - } else { - replaceTemporaryTypes(mapDataType.getValueType(), seenStructs); - } - - if (mapDataType.getKeyType() instanceof TemporaryDataType) { - mapDataType.setKeyType(getDataType(mapDataType.getKeyType().getCode(), "")); - } else { - replaceTemporaryTypes(mapDataType.getKeyType(), seenStructs); - } - } - - private void replaceTemporaryTypesInWeightedSet(WeightedSetDataType weightedSetDataType, List<DataType> seenStructs) { - if (weightedSetDataType.getNestedType() instanceof TemporaryDataType) { - weightedSetDataType.setNestedType(getDataType(weightedSetDataType.getNestedType().getCode(), "")); - } else { - replaceTemporaryTypes(weightedSetDataType.getNestedType(), seenStructs); - } - } - + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public void shutdown() { if (subscriber!=null) subscriber.close(); } diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java index a0ac3cb6620..e43ff26272a 100644 --- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java +++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java @@ -11,15 +11,19 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.HashSet; +import java.util.Set; import java.util.logging.Logger; import java.util.stream.Collectors; +import java.util.function.Supplier; +import com.yahoo.tensor.TensorType; /** * Configures the Vespa document manager from a config id. * * @author Einar M R Rosenvinge */ -public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig>{ +public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig> { private final static Logger log = Logger.getLogger(DocumentTypeManagerConfigurer.class.getName()); @@ -59,139 +63,511 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub return subscriber; } + /** One-shot configuration; should be called on a newly constructed manager */ static void configureNewManager(DocumentmanagerConfig config, DocumentTypeManager manager) { if (config == null) { return; } + new Apply(config, manager); + if (config.datatype().size() == 0 && config.annotationtype().size() == 0) { + new ApplyNewDoctypeConfig(config, manager); + } + } + + private static class Apply { + + public Apply(DocumentmanagerConfig config, DocumentTypeManager manager) { + this.manager = manager; + this.usev8geopositions = config.usev8geopositions(); + apply(config); + } + + private final Map<Integer, DataType> typesById = new HashMap<>(); + private final Map<String, DataType> typesByName = new HashMap<>(); + private final Map<Integer, DocumentmanagerConfig.Datatype> configMap = new HashMap<>(); + + private void inProgress(DataType type) { + var old = typesById.put(type.getId(), type); + if (old != null) { + throw new IllegalArgumentException("Multiple types with same id: "+old+" -> "+type); + } + old = typesByName.put(type.getName(), type); + if (old != null) { + log.warning("Multiple types with same name: "+old+" -> "+type); + } + } + + private void startStructsAndDocs(DocumentmanagerConfig config) { + for (var thisDataType : config.datatype()) { + for (var o : thisDataType.structtype()) { + int id = thisDataType.id(); + StructDataType type = new StructDataType(id, o.name()); + inProgress(type); + configMap.remove(id); + } + } + for (var thisDataType : config.datatype()) { + for (var doc : thisDataType.documenttype()) { + int id = thisDataType.id(); + StructDataType header = (StructDataType) typesById.get(doc.headerstruct()); + var importedFields = doc.importedfield().stream() + .map(f -> f.name()) + .collect(Collectors.toUnmodifiableSet()); + DocumentType type = new DocumentType(doc.name(), header, importedFields); + if (id != type.getId()) { + typesById.put(id, type); + // really old stuff, should rewrite tests using this: + int alt = (doc.name()+"."+doc.version()).hashCode(); + log.warning("Document type "+doc.name()+ + " wanted id "+id+" but got "+ + type.getId()+", alternative id was: "+alt); + } + inProgress(type); + configMap.remove(id); + } + } + } + + private DataType createArrayType(int id, DocumentmanagerConfig.Datatype.Arraytype array) { + DataType nestedType = getOrCreateType(array.datatype()); + ArrayDataType type = new ArrayDataType(nestedType, id); + inProgress(type); + return type; + } - setupAnnotationTypesWithoutPayloads(config, manager); - setupAnnotationRefTypes(config, manager); + private DataType createMapType(int id, DocumentmanagerConfig.Datatype.Maptype map) { + DataType keyType = getOrCreateType(map.keytype()); + DataType valType = getOrCreateType(map.valtype()); + MapDataType type = new MapDataType(keyType, valType, id); + inProgress(type); + return type; + } + + private DataType createWeightedSetType(int id, DocumentmanagerConfig.Datatype.Weightedsettype wset) { + DataType nestedType = getOrCreateType(wset.datatype()); + WeightedSetDataType type = + new WeightedSetDataType(nestedType, wset.createifnonexistant(), wset.removeifzero(), id); + inProgress(type); + return type; + } + + private DataType createReferenceType(int id, DocumentmanagerConfig.Datatype.Referencetype refType) { + int targetId = refType.target_type_id(); + DocumentType targetDocType = (DocumentType) typesById.get(targetId); + var type = new ReferenceDataType(targetDocType, id); + inProgress(type); + return type; + } + + @SuppressWarnings("deprecation") + private DataType getOrCreateType(int id) { + if (typesById.containsKey(id)) { + return typesById.get(id); + } + var config = configMap.remove(id); + if (config == null) { + return manager.getDataType(id); + } + assert(id == config.id()); + for (var o : config.arraytype()) { + return createArrayType(id, o); + } + for (var o : config.maptype()) { + return createMapType(id, o); + } + for (var o : config.weightedsettype()) { + return createWeightedSetType(id, o); + } + for (var o : config.referencetype()) { + return createReferenceType(id, o); + } + throw new IllegalArgumentException("Could not create type from config: "+config); + } - log.log(Level.FINE, "Configuring document manager with " + config.datatype().size() + " data types."); - ArrayList<DocumentmanagerConfig.Datatype> failed = new ArrayList<>(config.datatype()); - while (!failed.isEmpty()) { - ArrayList<DocumentmanagerConfig.Datatype> tmp = failed; - failed = new ArrayList<>(); - for (int i = 0; i < tmp.size(); i++) { - DocumentmanagerConfig.Datatype thisDataType = tmp.get(i); + private void createRemainingTypes(DocumentmanagerConfig config) { + for (var thisDataType : config.datatype()) { int id = thisDataType.id(); - try { - registerTypeIdMapping(manager, thisDataType, id); - } catch (IllegalArgumentException e) { - failed.add(thisDataType); + var type = getOrCreateType(id); + assert(type != null); + } + } + + @SuppressWarnings("deprecation") + private void fillStructs(DocumentmanagerConfig config) { + for (var thisDataType : config.datatype()) { + for (var struct : thisDataType.structtype()) { + int id = thisDataType.id(); + StructDataType type = (StructDataType) typesById.get(id); + for (var parent : struct.inherits()) { + var parentStruct = (StructDataType) typesByName.get(parent.name()); + type.inherit(parentStruct); + } + for (var field : struct.field()) { + if (field.datatype() == id) { + log.fine("Self-referencing struct "+struct.name()+" field: "+field); + } + DataType fieldType = typesById.get(field.datatype()); + if (fieldType == null) { + fieldType = manager.getDataType(field.datatype(), field.detailedtype()); + } + if (field.id().size() == 1) { + type.addField(new Field(field.name(), field.id().get(0).id(), fieldType)); + } else { + type.addField(new Field(field.name(), fieldType)); + } + } } } } - addStructInheritance(config, manager); - addAnnotationTypePayloads(config, manager); - addAnnotationTypeInheritance(config, manager); - manager.replaceTemporaryTypes(); - } + private void fillDocuments(DocumentmanagerConfig config) { + for (var thisDataType : config.datatype()) { + for (var doc : thisDataType.documenttype()) { + int id = thisDataType.id(); + DocumentType type = (DocumentType) typesById.get(id); + for (var parent : doc.inherits()) { + DocumentType parentType = (DocumentType) typesByName.get(parent.name()); + if (parentType == null) { + DataTypeName name = new DataTypeName(parent.name()); + parentType = manager.getDocumentType(name); + } + if (parentType == null) { + throw new IllegalArgumentException("Could not find parent document type '" + parent.name() + "'."); + } + type.inherit(parentType); + } + Map<String, Collection<String>> fieldSets = new HashMap<>(doc.fieldsets().size()); + for (Map.Entry<String, DocumentmanagerConfig.Datatype.Documenttype.Fieldsets> entry: doc.fieldsets().entrySet()) { + fieldSets.put(entry.getKey(), entry.getValue().fields()); + } + type.addFieldSets(fieldSets); + } + } + } - private static void registerTypeIdMapping(DocumentTypeManager manager, DocumentmanagerConfig.Datatype thisDataType, int id) { - for (var o : thisDataType.arraytype()) { - registerArrayType(manager, id, o); + private void splitConfig(DocumentmanagerConfig config) { + for (var dataTypeConfig : config.datatype()) { + int id = dataTypeConfig.id(); + var old = configMap.put(id, dataTypeConfig); + if (old != null) { + throw new IllegalArgumentException + ("Multiple configs for id "+id+" first: "+old+" second: "+dataTypeConfig); + } + } } - for (var o : thisDataType.maptype()) { - registerMapType(manager, id, o); + + private void apply(DocumentmanagerConfig config) { + splitConfig(config); + setupAnnotationTypesWithoutPayloads(config); + setupAnnotationRefTypes(config); + startStructsAndDocs(config); + createRemainingTypes(config); + fillStructs(config); + fillDocuments(config); + for (DataType type : typesById.values()) { + manager.register(type); + } + addAnnotationTypePayloads(config); + addAnnotationTypeInheritance(config); } - for (var o : thisDataType.weightedsettype()) { - registerWeightedSetType(manager, id, o); + + private void setupAnnotationRefTypes(DocumentmanagerConfig config) { + for (var thisDataType : config.datatype()) { + int id = thisDataType.id(); + for (var annRefType : thisDataType.annotationreftype()) { + AnnotationType annotationType = manager.getAnnotationTypeRegistry().getType(annRefType.annotation()); + if (annotationType == null) { + throw new IllegalArgumentException("Found reference to " + annRefType.annotation() + ", which does not exist!"); + } + AnnotationReferenceDataType type = new AnnotationReferenceDataType(annotationType, id); + inProgress(type); + configMap.remove(id); + } + } } - for (var o : thisDataType.structtype()) { - registerStructType(manager, id, o); + + private void setupAnnotationTypesWithoutPayloads(DocumentmanagerConfig config) { + for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { + AnnotationType annotationType = new AnnotationType(annType.name(), annType.id()); + manager.getAnnotationTypeRegistry().register(annotationType); + } } - for (var o : thisDataType.documenttype()) { - registerDocumentType(manager, o); + + @SuppressWarnings("deprecation") + private void addAnnotationTypePayloads(DocumentmanagerConfig config) { + for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { + AnnotationType annotationType = manager.getAnnotationTypeRegistry().getType(annType.id()); + DataType payload = manager.getDataType(annType.datatype(), ""); + if (! payload.equals(DataType.NONE)) { + annotationType.setDataType(payload); + } + } + } - for (var o : thisDataType.referencetype()) { - registerReferenceType(manager, id, o); + + private void addAnnotationTypeInheritance(DocumentmanagerConfig config) { + for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { + if (annType.inherits().size() > 0) { + AnnotationType inheritedType = manager.getAnnotationTypeRegistry().getType(annType.inherits(0).id()); + AnnotationType type = manager.getAnnotationTypeRegistry().getType(annType.id()); + type.inherit(inheritedType); + } + } } - } - private static void registerArrayType(DocumentTypeManager manager, int id, - DocumentmanagerConfig.Datatype.Arraytype array) { - DataType nestedType = manager.getDataType(array.datatype(), ""); - ArrayDataType type = new ArrayDataType(nestedType, id); - manager.register(type); + private final boolean usev8geopositions; + private final DocumentTypeManager manager; } - private static void registerMapType(DocumentTypeManager manager, int id, - DocumentmanagerConfig.Datatype.Maptype map) { - DataType keyType = manager.getDataType(map.keytype(), ""); - DataType valType = manager.getDataType(map.valtype(), ""); - MapDataType type = new MapDataType(keyType, valType, id); - manager.register(type); - } - private static void registerWeightedSetType(DocumentTypeManager manager, int id, - DocumentmanagerConfig.Datatype.Weightedsettype wset) { - DataType nestedType = manager.getDataType(wset.datatype(), ""); - WeightedSetDataType type = new WeightedSetDataType( - nestedType, wset.createifnonexistant(), wset.removeifzero(), id); - manager.register(type); - } + private static class ApplyNewDoctypeConfig { - private static void registerDocumentType(DocumentTypeManager manager, DocumentmanagerConfig.Datatype.Documenttype doc) { - StructDataType header = (StructDataType) manager.getDataType(doc.headerstruct(), ""); - var importedFields = doc.importedfield().stream() - .map(f -> f.name()) - .collect(Collectors.toUnmodifiableSet()); - DocumentType type = new DocumentType(doc.name(), header, importedFields); - for (var parent : doc.inherits()) { - DataTypeName name = new DataTypeName(parent.name()); - DocumentType parentType = manager.getDocumentType(name); - if (parentType == null) { - throw new IllegalArgumentException("Could not find document type '" + name + "'."); - } - type.inherit(parentType); + public ApplyNewDoctypeConfig(DocumentmanagerConfig config, DocumentTypeManager manager) { + this.manager = manager; + this.usev8geopositions = config.usev8geopositions(); + apply(config); } - Map<String, Collection<String>> fieldSets = new HashMap<>(doc.fieldsets().size()); - for (Map.Entry<String, DocumentmanagerConfig.Datatype.Documenttype.Fieldsets> entry: doc.fieldsets().entrySet()) { - fieldSets.put(entry.getKey(), entry.getValue().fields()); + + Map<Integer, DataType> typesByIdx = new HashMap<>(); + + DataType addNewType(int id, DataType type) { + if (type == null) { + throw new IllegalArgumentException("Type to add for idx "+id+" cannot be null"); + } + var old = typesByIdx.put(id, type); + if (old != null) { + throw new IllegalArgumentException("Type "+type+" for idx "+id+" conflict: "+old+" present"); + } + return type; } - type.addFieldSets(fieldSets); - manager.register(type); - } - private static void registerStructType(DocumentTypeManager manager, int id, - DocumentmanagerConfig.Datatype.Structtype struct) { - StructDataType type = new StructDataType(id, struct.name()); + Map<Integer, Supplier<DataType>> factoryByIdx = new HashMap<>(); - for (var field : struct.field()) { - DataType fieldType = (field.datatype() == id) - ? manager.getDataTypeAndReturnTemporary(field.datatype(), field.detailedtype()) - : manager.getDataType(field.datatype(), field.detailedtype()); + ArrayList<Integer> proxyRefs = new ArrayList<>(); - if (field.id().size() == 1) { - type.addField(new Field(field.name(), field.id().get(0).id(), fieldType)); - } else { - type.addField(new Field(field.name(), fieldType)); + private DataType getOrCreateType(int id) { + if (typesByIdx.containsKey(id)) { + return typesByIdx.get(id); + } + var factory = factoryByIdx.remove(id); + if (factory != null) { + DataType type = factory.get(); + return addNewType(id, type); } + throw new IllegalArgumentException("No type or factory found for idx: "+id); } - manager.register(type); - } - private static void registerReferenceType(DocumentTypeManager manager, int id, - DocumentmanagerConfig.Datatype.Referencetype refType) { - ReferenceDataType referenceType; - if (manager.hasDataType(refType.target_type_id())) { - DocumentType targetDocType = (DocumentType)manager.getDataType(refType.target_type_id()); - referenceType = new ReferenceDataType(targetDocType, id); - } else { - TemporaryStructuredDataType temporaryTargetType = TemporaryStructuredDataType.createById(refType.target_type_id()); - referenceType = new ReferenceDataType(temporaryTargetType, id); + void createComplexTypes() { + var toCreate = new ArrayList<>(factoryByIdx.keySet()); + for (var dataTypeId : toCreate) { + var type = getOrCreateType(dataTypeId); + assert(type != null); + } + } + + class PerDocTypeData { + + DocumentmanagerConfig.Doctype docTypeConfig; + + DocumentType docType = null; + + PerDocTypeData(DocumentmanagerConfig.Doctype config) { + this.docTypeConfig = config; + } + + void createSimpleTypes() { + for (var typeconf : docTypeConfig.primitivetype()) { + DataType type = manager.getDataType(typeconf.name()); + if (! (type instanceof PrimitiveDataType)) { + throw new IllegalArgumentException("Needed primitive type for idx "+typeconf.idx()+" but got: "+type); + } + addNewType(typeconf.idx(), type); + } + for (var typeconf : docTypeConfig.tensortype()) { + var type = new TensorDataType(TensorType.fromSpec(typeconf.detailedtype())); + addNewType(typeconf.idx(), type); + } + } + + void createFactories() { + for (var typeconf : docTypeConfig.arraytype()) { + factoryByIdx.put(typeconf.idx(), () -> new ArrayDataType(getOrCreateType(typeconf.elementtype()))); + } + for (var typeconf : docTypeConfig.maptype()) { + factoryByIdx.put(typeconf.idx(), () -> new MapDataType(getOrCreateType(typeconf.keytype()), + getOrCreateType(typeconf.valuetype()))); + } + for (var typeconf : docTypeConfig.wsettype()) { + factoryByIdx.put(typeconf.idx(), () -> new WeightedSetDataType(getOrCreateType(typeconf.elementtype()), + typeconf.createifnonexistent(), + typeconf.removeifzero())); + } + for (var typeconf : docTypeConfig.documentref()) { + factoryByIdx.put(typeconf.idx(), () -> ReferenceDataType.createWithInferredId(inProgressById.get(typeconf.targettype()).docType)); + } + for (var typeconf : docTypeConfig.annotationref()) { + factoryByIdx.put(typeconf.idx(), () -> new AnnotationReferenceDataType + (annTypeFromIdx(typeconf.annotationtype()))); + } + } + + void createEmptyStructs() { + String docName = docTypeConfig.name(); + for (var typeconf : docTypeConfig.structtype()) { + addNewType(typeconf.idx(), new StructDataType(typeconf.name())); + } + } + + void initializeDocType() { + Set<String> importedFields = new HashSet<>(); + for (var imported : docTypeConfig.importedfield()) { + importedFields.add(imported.name()); + } + int contentIdx = docTypeConfig.contentstruct(); + DataType contentStruct = typesByIdx.get(contentIdx); + if (! (contentStruct instanceof StructDataType)) { + throw new IllegalArgumentException("Content struct for document type "+docTypeConfig.name()+ + " should be a struct, but was: "+contentStruct); + } + if (docTypeConfig.name().equals(DataType.DOCUMENT.getName())) { + this.docType = DataType.DOCUMENT; + } else { + this.docType = new DocumentType(docTypeConfig.name(), (StructDataType)contentStruct, importedFields); + } + addNewType(docTypeConfig.idx(), docType); + } + + void createEmptyAnnotationTypes() { + for (var typeconf : docTypeConfig.annotationtype()) { + AnnotationType annType = manager.getAnnotationTypeRegistry().getType(typeconf.name()); + if (typeconf.internalid() != -1) { + if (annType == null) { + annType = new AnnotationType(typeconf.name(), typeconf.internalid()); + } else { + if (annType.getId() != typeconf.internalid()) { + throw new IllegalArgumentException("Wrong internalid for annotation type "+annType+ + " (wanted "+typeconf.internalid()+", got "+annType.getId()+")"); + } + } + } else if (annType == null) { + annType = new AnnotationType(typeconf.name()); + } + manager.getAnnotationTypeRegistry().register(annType); + // because AnnotationType is not a DataType, make a proxy + var proxy = new AnnotationReferenceDataType(annType); + proxyRefs.add(typeconf.idx()); + addNewType(typeconf.idx(), proxy); + } + } + + AnnotationType annTypeFromIdx(int idx) { + var proxy = (AnnotationReferenceDataType) typesByIdx.get(idx); + if (proxy == null) { + throw new IllegalArgumentException("Needed AnnotationType for idx "+idx+", found: "+typesByIdx.get(idx)); + } + return proxy.getAnnotationType(); + } + + void fillAnnotationTypes() { + for (var typeConf : docTypeConfig.annotationtype()) { + var annType = annTypeFromIdx(typeConf.idx()); + int pIdx = typeConf.datatype(); + if (pIdx != -1) { + DataType payload = getOrCreateType(pIdx); + annType.setDataType(payload); + } + for (var inherit : typeConf.inherits()) { + var inheritedType = annTypeFromIdx(inherit.idx()); + if (! annType.inherits(inheritedType)) { + annType.inherit(inheritedType); + } + } + } + } + void fillStructs() { + for (var structCfg : docTypeConfig.structtype()) { + int idx = structCfg.idx(); + StructDataType type = (StructDataType) typesByIdx.get(idx); + for (var parent : structCfg.inherits()) { + var parentStruct = (StructDataType) typesByIdx.get(parent.type()); + type.inherit(parentStruct); + } + for (var fieldCfg : structCfg.field()) { + if (fieldCfg.type() == idx) { + log.fine("Self-referencing struct "+structCfg.name()+" field: "+fieldCfg); + } + DataType fieldType = getOrCreateType(fieldCfg.type()); + type.addField(new Field(fieldCfg.name(), fieldCfg.internalid(), fieldType)); + } + } + } + void fillDocument() { + for (var inherit : docTypeConfig.inherits()) { + var data = inProgressById.get(inherit.idx()); + if (data == null) { + throw new IllegalArgumentException("Missing doctype for inherit idx: "+inherit.idx()); + } else { + docType.inherit(data.docType); + } + } + Map<String, Collection<String>> fieldSets = new HashMap<>(); + for (var entry : docTypeConfig.fieldsets().entrySet()) { + fieldSets.put(entry.getKey(), entry.getValue().fields()); + } + Set<String> importedFields = new HashSet<>(); + for (var imported : docTypeConfig.importedfield()) { + importedFields.add(imported.name()); + } + docType.addFieldSets(fieldSets); + } + } + + private final Map<String, PerDocTypeData> inProgressByName = new HashMap<>(); + private final Map<Integer, PerDocTypeData> inProgressById = new HashMap<>(); + + private void apply(DocumentmanagerConfig config) { + for (var docType : config.doctype()) { + int idx = docType.idx(); + String name = docType.name(); + var data = new PerDocTypeData(docType); + var old = inProgressById.put(idx, data); + if (old != null) { + throw new IllegalArgumentException("Multiple document types with id: "+idx); + } + old = inProgressByName.put(name, data); + if (old != null) { + throw new IllegalArgumentException("Multiple document types with name: "+name); + } + } + for (var docType : config.doctype()) { + var docTypeData = inProgressById.get(docType.idx()); + docTypeData.createEmptyStructs(); + docTypeData.initializeDocType(); + docTypeData.createEmptyAnnotationTypes(); + docTypeData.createFactories(); + docTypeData.createSimpleTypes(); + } + createComplexTypes(); + for (var docType : config.doctype()) { + var docTypeData = inProgressById.get(docType.idx()); + docTypeData.fillStructs(); + docTypeData.fillDocument(); + docTypeData.fillAnnotationTypes(); + } + for (int idx : proxyRefs) { + typesByIdx.remove(idx); + } + for (DataType type : typesByIdx.values()) { + manager.register(type); + } } - // Note: can't combine the above new-statements, as they call different constructors. - manager.register(referenceType); + + private final boolean usev8geopositions; + private final DocumentTypeManager manager; } public static DocumentTypeManager configureNewManager(DocumentmanagerConfig config) { DocumentTypeManager manager = new DocumentTypeManager(); - if (config == null) { - return manager; - } configureNewManager(config, manager); return manager; } @@ -211,61 +587,4 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub managerToConfigure.assign(manager); } - private static void setupAnnotationRefTypes(DocumentmanagerConfig config, DocumentTypeManager manager) { - for (int i = 0; i < config.datatype().size(); i++) { - DocumentmanagerConfig.Datatype thisDataType = config.datatype(i); - int id = thisDataType.id(); - for (var annRefType : thisDataType.annotationreftype()) { - AnnotationType annotationType = manager.getAnnotationTypeRegistry().getType(annRefType.annotation()); - if (annotationType == null) { - throw new IllegalArgumentException("Found reference to " + annRefType.annotation() + ", which does not exist!"); - } - AnnotationReferenceDataType type = new AnnotationReferenceDataType(annotationType, id); - manager.register(type); - } - } - } - - private static void setupAnnotationTypesWithoutPayloads(DocumentmanagerConfig config, DocumentTypeManager manager) { - for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { - AnnotationType annotationType = new AnnotationType(annType.name(), annType.id()); - manager.getAnnotationTypeRegistry().register(annotationType); - } - } - - private static void addAnnotationTypePayloads(DocumentmanagerConfig config, DocumentTypeManager manager) { - for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { - AnnotationType annotationType = manager.getAnnotationTypeRegistry().getType(annType.id()); - DataType payload = manager.getDataType(annType.datatype(), ""); - if (!payload.equals(DataType.NONE)) { - annotationType.setDataType(payload); - } - } - - } - - private static void addAnnotationTypeInheritance(DocumentmanagerConfig config, DocumentTypeManager manager) { - for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { - if (annType.inherits().size() > 0) { - AnnotationType inheritedType = manager.getAnnotationTypeRegistry().getType(annType.inherits(0).id()); - AnnotationType type = manager.getAnnotationTypeRegistry().getType(annType.id()); - type.inherit(inheritedType); - } - } - } - - private static void addStructInheritance(DocumentmanagerConfig config, DocumentTypeManager manager) { - for (int i = 0; i < config.datatype().size(); i++) { - DocumentmanagerConfig.Datatype thisDataType = config.datatype(i); - int id = thisDataType.id(); - for (var struct : thisDataType.structtype()) { - StructDataType thisStruct = (StructDataType) manager.getDataType(id, ""); - - for (var parent : struct.inherits()) { - StructDataType parentStruct = (StructDataType) manager.getDataType(parent.name()); - thisStruct.inherit(parentStruct); - } - } - } - } } diff --git a/document/src/main/java/com/yahoo/document/ReferenceDataType.java b/document/src/main/java/com/yahoo/document/ReferenceDataType.java index 78a30c0fcf2..c3b5f6590b6 100644 --- a/document/src/main/java/com/yahoo/document/ReferenceDataType.java +++ b/document/src/main/java/com/yahoo/document/ReferenceDataType.java @@ -27,6 +27,7 @@ public class ReferenceDataType extends DataType { * of the target document type might not yet be known. The temporary data type should be * replaced later using setTargetType(). */ + @SuppressWarnings("deprecation") public ReferenceDataType(TemporaryStructuredDataType temporaryTargetType, int id) { this((StructuredDataType) temporaryTargetType, id); } @@ -54,6 +55,7 @@ public class ReferenceDataType extends DataType { /** * Creates a new type where the numeric ID is based on the hash of targetType */ + @SuppressWarnings("deprecation") public static ReferenceDataType createWithInferredId(TemporaryStructuredDataType targetType) { return new ReferenceDataType(targetType); } @@ -67,6 +69,7 @@ public class ReferenceDataType extends DataType { * @throws IllegalStateException if the previously stored target type is already a concrete * instance (not TemporaryStructuredDataType). */ + @SuppressWarnings("deprecation") public void setTargetType(StructuredDataType targetType) { if (! (this.targetType instanceof TemporaryStructuredDataType)) { throw new IllegalStateException(String.format( diff --git a/document/src/main/java/com/yahoo/document/TemporaryDataType.java b/document/src/main/java/com/yahoo/document/TemporaryDataType.java index c32e271737c..71f36ecde90 100644 --- a/document/src/main/java/com/yahoo/document/TemporaryDataType.java +++ b/document/src/main/java/com/yahoo/document/TemporaryDataType.java @@ -5,7 +5,9 @@ import com.yahoo.document.datatypes.FieldValue; /** * @author Einar M R Rosenvinge + * @deprecated will be removed soon */ +@Deprecated class TemporaryDataType extends DataType { private final String detailedType; diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java index 9d648367c4e..865310e7009 100644 --- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java +++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java @@ -5,8 +5,10 @@ package com.yahoo.document; * Internal class, DO NOT USE!! * Only public because it must be used from com.yahoo.searchdefinition.parser. * + * @deprecated will be removed soon * @author Einar M R Rosenvinge */ +@Deprecated // TODO: Remove on Vespa 8 public class TemporaryStructuredDataType extends StructDataType { TemporaryStructuredDataType(String name) { diff --git a/document/src/main/java/com/yahoo/document/annotation/Annotation.java b/document/src/main/java/com/yahoo/document/annotation/Annotation.java index a5f70c2b9e3..2ee2d0baaa7 100644 --- a/document/src/main/java/com/yahoo/document/annotation/Annotation.java +++ b/document/src/main/java/com/yahoo/document/annotation/Annotation.java @@ -223,6 +223,7 @@ public class Annotation implements Comparable<Annotation> { public String toString() { String retval = "annotation of type " + type; retval += ((value == null) ? " (no value)" : " (with value)"); + retval += ((spanNode == null) ? " (no span)" : (" with span "+spanNode)); return retval; } diff --git a/document/src/main/java/com/yahoo/document/idstring/IdString.java b/document/src/main/java/com/yahoo/document/idstring/IdString.java index 62fd7a2df99..55beff9eef9 100644 --- a/document/src/main/java/com/yahoo/document/idstring/IdString.java +++ b/document/src/main/java/com/yahoo/document/idstring/IdString.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.document.idstring; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.text.Text; import com.yahoo.text.Utf8String; diff --git a/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java b/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java index 8d2b19b2818..91274144710 100644 --- a/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java +++ b/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java @@ -3,6 +3,7 @@ package com.yahoo.document.json.readers; import com.fasterxml.jackson.core.JsonToken; import com.yahoo.document.DataType; +import com.yahoo.document.PositionDataType; import com.yahoo.document.datatypes.CollectionFieldValue; import com.yahoo.document.datatypes.FieldValue; import com.yahoo.document.datatypes.MapFieldValue; @@ -35,6 +36,8 @@ public class CompositeReader { } } else if (fieldValue instanceof MapFieldValue) { MapReader.fillMap(buffer, (MapFieldValue) fieldValue); + } else if (PositionDataType.INSTANCE.equals(fieldValue.getDataType())) { + GeoPositionReader.fillGeoPosition(buffer, fieldValue); } else if (fieldValue instanceof StructuredFieldValue) { StructReader.fillStruct(buffer, (StructuredFieldValue) fieldValue); } else if (fieldValue instanceof TensorFieldValue) { diff --git a/document/src/main/java/com/yahoo/document/json/readers/GeoPositionReader.java b/document/src/main/java/com/yahoo/document/json/readers/GeoPositionReader.java new file mode 100644 index 00000000000..eb3919e07d7 --- /dev/null +++ b/document/src/main/java/com/yahoo/document/json/readers/GeoPositionReader.java @@ -0,0 +1,56 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.document.json.readers; + +import com.fasterxml.jackson.core.JsonToken; +import com.yahoo.document.PositionDataType; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.json.TokenBuffer; + +import static com.yahoo.document.json.readers.JsonParserHelpers.*; + +/** + * @author arnej + */ +public class GeoPositionReader { + + static void fillGeoPosition(TokenBuffer buffer, FieldValue positionFieldValue) { + Double latitude = null; + Double longitude = null; + expectObjectStart(buffer.currentToken()); + int initNesting = buffer.nesting(); + for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { + String curName = buffer.currentName(); + if ("lat".equals(curName) || "latitude".equals(curName)) { + latitude = readDouble(buffer) * 1.0e6; + } else if ("lng".equals(curName) || "longitude".equals(curName)) { + longitude = readDouble(buffer) * 1.0e6; + } else if ("x".equals(curName)) { + longitude = readDouble(buffer); + } else if ("y".equals(curName)) { + latitude = readDouble(buffer); + } else { + throw new IllegalArgumentException("Unexpected attribute "+curName+" in geo position field"); + } + } + expectObjectEnd(buffer.currentToken()); + if (latitude == null) { + throw new IllegalArgumentException("Missing 'lat' attribute in geo position field"); + } + if (longitude == null) { + throw new IllegalArgumentException("Missing 'lng' attribute in geo position field"); + } + int y = (int) Math.round(latitude); + int x = (int) Math.round(longitude); + var geopos = PositionDataType.valueOf(x, y); + positionFieldValue.assign(geopos); + } + + private static double readDouble(TokenBuffer buffer) { + try { + return Double.parseDouble(buffer.currentText()); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Expected a number but got '" + buffer.currentText()); + } + } + +} diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java index 58cc3c22199..9115a000e20 100644 --- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java +++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java @@ -714,6 +714,7 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); + int skipToPos = buf.position() + length; if ((features & (byte) 1) == (byte) 1) { //we have a span node @@ -728,15 +729,19 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu if ((features & (byte) 2) == (byte) 2) { //we have a value: int dataTypeId = buf.getInt(); - - //if this data type ID the same as the one in our config? - if (dataTypeId != type.getDataType().getId()) { - //not the same, but we will handle it gracefully, and just skip past the data: - buf.position(buf.position() + length - 4); - } else { + try { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); + // could get buffer underflow or DeserializationException + } catch (RuntimeException rte) { + if (dataTypeId == type.getDataType().getId()) { + throw new DeserializationException("Could not deserialize annotation payload", rte); + } + // XXX: does this make sense? The annotation without its payload may be a problem. + // handle it gracefully, and just skip past the data + } finally { + buf.position(skipToPos); } } } diff --git a/document/src/test/document/documentmanager.cfg b/document/src/test/document/documentmanager.cfg index e4c581304ce..6ceda63e606 100644 --- a/document/src/test/document/documentmanager.cfg +++ b/document/src/test/document/documentmanager.cfg @@ -1,105 +1,96 @@ -datatype[11] -datatype[0].id -1365874599 -datatype[0].arraytype[0] -datatype[0].weightedsettype[0] -datatype[0].structtype[1] -datatype[0].structtype[0].name foobar.header -datatype[0].structtype[0].version 9 -datatype[0].structtype[0].field[2] -datatype[0].structtype[0].field[0].name foobarfield1 -datatype[0].structtype[0].field[0].id[0] -datatype[0].structtype[0].field[0].datatype 4 -datatype[0].structtype[0].field[1].name foobarfield0 -datatype[0].structtype[0].field[1].id[0] -datatype[0].structtype[0].field[1].datatype 2 -datatype[0].documenttype[0] -datatype[1].id 278604398 -datatype[1].arraytype[0] -datatype[1].weightedsettype[0] -datatype[1].structtype[1] -datatype[1].structtype[0].name foobar.body -datatype[1].structtype[0].version 9 -datatype[1].documenttype[0] -datatype[2].id 378030104 -datatype[2].arraytype[0] -datatype[2].weightedsettype[0] -datatype[2].structtype[0] -datatype[2].documenttype[1] -datatype[2].documenttype[0].name foobar -datatype[2].documenttype[0].version 9 -datatype[2].documenttype[0].inherits[0] -datatype[2].documenttype[0].headerstruct -1365874599 -datatype[2].documenttype[0].bodystruct 278604398 -datatype[3].id 673066331 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name banana.header -datatype[3].structtype[0].version 234 -datatype[3].structtype[0].field[1] -datatype[3].structtype[0].field[0].name bananafield0 -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 16 -datatype[3].documenttype[0] -datatype[4].id -176986064 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[1] -datatype[4].structtype[0].name banana.body -datatype[4].structtype[0].version 234 -datatype[4].documenttype[0] -datatype[5].id 556449802 -datatype[5].arraytype[0] -datatype[5].weightedsettype[0] -datatype[5].structtype[0] -datatype[5].documenttype[1] -datatype[5].documenttype[0].name banana -datatype[5].documenttype[0].version 234 -datatype[5].documenttype[0].inherits[1] -datatype[5].documenttype[0].inherits[0].name foobar -datatype[5].documenttype[0].inherits[0].version 9 -datatype[5].documenttype[0].headerstruct 673066331 -datatype[5].documenttype[0].bodystruct -176986064 -datatype[6].id -858669928 -datatype[6].arraytype[0] -datatype[6].weightedsettype[0] -datatype[6].structtype[1] -datatype[6].structtype[0].name customtypes.header -datatype[6].structtype[0].version 3 -datatype[6].structtype[0].field[2] -datatype[6].structtype[0].field[0].name arrayfloat -datatype[6].structtype[0].field[0].id[0] -datatype[6].structtype[0].field[0].datatype 99 -datatype[6].structtype[0].field[1].name arrayarrayfloat -datatype[6].structtype[0].field[1].id[0] -datatype[6].structtype[0].field[1].datatype 4003 -datatype[6].documenttype[0] -datatype[7].id 99 -datatype[7].arraytype[1] -datatype[7].arraytype[0].datatype 1 -datatype[7].weightedsettype[0] -datatype[7].structtype[0] -datatype[7].documenttype[0] -datatype[8].id 4003 -datatype[8].arraytype[1] -datatype[8].arraytype[0].datatype 99 -datatype[8].weightedsettype[0] -datatype[8].structtype[0] -datatype[8].documenttype[0] -datatype[9].id 2142817261 -datatype[9].arraytype[0] -datatype[9].weightedsettype[0] -datatype[9].structtype[1] -datatype[9].structtype[0].name customtypes.body -datatype[9].structtype[0].version 3 -datatype[9].documenttype[0] -datatype[10].id -1500313747 -datatype[10].arraytype[0] -datatype[10].weightedsettype[0] -datatype[10].structtype[0] -datatype[10].documenttype[1] -datatype[10].documenttype[0].name customtypes -datatype[10].documenttype[0].version 3 -datatype[10].documenttype[0].inherits[0] -datatype[10].documenttype[0].headerstruct -858669928 -datatype[10].documenttype[0].bodystruct 2142817261 +doctype[4] +doctype[0].name "document" +doctype[0].idx 1000 +doctype[0].contentstruct 1001 +doctype[0].primitivetype[0].idx 1002 +doctype[0].primitivetype[0].name "int" +doctype[0].primitivetype[1].idx 1003 +doctype[0].primitivetype[1].name "double" +doctype[0].primitivetype[2].idx 1004 +doctype[0].primitivetype[2].name "string" +doctype[0].annotationtype[0].idx 1005 +doctype[0].annotationtype[0].name "proximity_break" +doctype[0].annotationtype[0].internalid 8 +doctype[0].annotationtype[0].datatype 1003 +doctype[0].annotationtype[1].idx 1006 +doctype[0].annotationtype[1].name "normalized" +doctype[0].annotationtype[1].internalid 4 +doctype[0].annotationtype[1].datatype 1004 +doctype[0].annotationtype[2].idx 1007 +doctype[0].annotationtype[2].name "reading" +doctype[0].annotationtype[2].internalid 5 +doctype[0].annotationtype[2].datatype 1004 +doctype[0].annotationtype[3].idx 1008 +doctype[0].annotationtype[3].name "term" +doctype[0].annotationtype[3].internalid 1 +doctype[0].annotationtype[3].datatype 1004 +doctype[0].annotationtype[4].idx 1009 +doctype[0].annotationtype[4].name "transformed" +doctype[0].annotationtype[4].internalid 7 +doctype[0].annotationtype[4].datatype 1004 +doctype[0].annotationtype[5].idx 1010 +doctype[0].annotationtype[5].name "canonical" +doctype[0].annotationtype[5].internalid 3 +doctype[0].annotationtype[5].datatype 1004 +doctype[0].annotationtype[6].idx 1011 +doctype[0].annotationtype[6].name "token_type" +doctype[0].annotationtype[6].internalid 2 +doctype[0].annotationtype[6].datatype 1002 +doctype[0].annotationtype[7].idx 1012 +doctype[0].annotationtype[7].name "special_token" +doctype[0].annotationtype[7].internalid 9 +doctype[0].annotationtype[8].idx 1013 +doctype[0].annotationtype[8].name "stem" +doctype[0].annotationtype[8].internalid 6 +doctype[0].annotationtype[8].datatype 1004 +doctype[0].structtype[0].idx 1001 +doctype[0].structtype[0].name document.header +doctype[1].name "foobar" +doctype[1].idx 1014 +doctype[1].inherits[0].idx 1000 +doctype[1].contentstruct 1015 +doctype[1].primitivetype[0].idx 1016 +doctype[1].primitivetype[0].name "long" +doctype[1].structtype[0].idx 1015 +doctype[1].structtype[0].name foobar.header +doctype[1].structtype[0].field[0].name "foobarfield1" +doctype[1].structtype[0].field[0].internalid 1707020592 +doctype[1].structtype[0].field[0].type 1016 +doctype[1].structtype[0].field[1].name "foobarfield0" +doctype[1].structtype[0].field[1].internalid 1055920092 +doctype[1].structtype[0].field[1].type 1004 +doctype[2].name "banana" +doctype[2].idx 1017 +doctype[2].inherits[0].idx 1014 +doctype[2].contentstruct 1018 +doctype[2].primitivetype[0].idx 1019 +doctype[2].primitivetype[0].name "byte" +doctype[2].structtype[0].idx 1018 +doctype[2].structtype[0].name banana.header +doctype[2].structtype[0].field[0].name "foobarfield1" +doctype[2].structtype[0].field[0].internalid 1707020592 +doctype[2].structtype[0].field[0].type 1016 +doctype[2].structtype[0].field[1].name "foobarfield0" +doctype[2].structtype[0].field[1].internalid 1055920092 +doctype[2].structtype[0].field[1].type 1004 +doctype[2].structtype[0].field[2].name "bananafield0" +doctype[2].structtype[0].field[2].internalid 1294599520 +doctype[2].structtype[0].field[2].type 1019 +doctype[3].name "customtypes" +doctype[3].idx 1020 +doctype[3].inherits[0].idx 1000 +doctype[3].contentstruct 1021 +doctype[3].primitivetype[0].idx 1023 +doctype[3].primitivetype[0].name "float" +doctype[3].arraytype[0].idx 1022 +doctype[3].arraytype[0].elementtype 1024 +doctype[3].arraytype[1].idx 1024 +doctype[3].arraytype[1].elementtype 1023 +doctype[3].structtype[0].idx 1021 +doctype[3].structtype[0].name customtypes.header +doctype[3].structtype[0].field[0].name "arrayfloat" +doctype[3].structtype[0].field[0].internalid 1493411963 +doctype[3].structtype[0].field[0].type 1024 +doctype[3].structtype[0].field[1].name "arrayarrayfloat" +doctype[3].structtype[0].field[1].internalid 890649191 +doctype[3].structtype[0].field[1].type 1022 diff --git a/document/src/test/java/com/yahoo/document/DocInDocTestCase.java b/document/src/test/java/com/yahoo/document/DocInDocTestCase.java index f5b6b4ea9bf..57972d20509 100644 --- a/document/src/test/java/com/yahoo/document/DocInDocTestCase.java +++ b/document/src/test/java/com/yahoo/document/DocInDocTestCase.java @@ -21,8 +21,8 @@ public class DocInDocTestCase { @Test public void testDocInDoc() { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/documentmanager.docindoc.cfg"); - + var sub = DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/documentmanager.docindoc.cfg"); + sub.close(); Document inner1 = new Document(manager.getDocumentType("docindoc"), "id:inner:docindoc::one"); inner1.setFieldValue("name", new StringFieldValue("Donald Duck")); inner1.setFieldValue("content", new StringFieldValue("Lives in Duckburg")); diff --git a/document/src/test/java/com/yahoo/document/DocumentTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTestCase.java index 144c7d62894..47605264d44 100644 --- a/document/src/test/java/com/yahoo/document/DocumentTestCase.java +++ b/document/src/test/java/com/yahoo/document/DocumentTestCase.java @@ -95,10 +95,12 @@ public class DocumentTestCase extends DocumentTestCaseBase { static DocumentTypeManager setUpDocType(String filename) { DocumentTypeManager dcMan = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(dcMan, filename); + var sub = DocumentTypeManagerConfigurer.configure(dcMan, filename); + sub.close(); return dcMan; } + @SuppressWarnings("deprecation") public void setUpSertestDocType() { docMan = new DocumentTypeManager(); @@ -876,6 +878,7 @@ public class DocumentTestCase extends DocumentTestCaseBase { doc.setFieldValue("something", testlist); } + @SuppressWarnings("deprecation") @Test public void testCompressionConfiguredIsIgnored() { @@ -1093,6 +1096,7 @@ public class DocumentTestCase extends DocumentTestCaseBase { assertEquals(doc, doc2); } + @SuppressWarnings("deprecation") @Test public void testUnknownFieldsDeserialization() { DocumentTypeManager docTypeManasjer = new DocumentTypeManager(); diff --git a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java index 1a0e2ad1e2b..0aa5aec4b85 100644 --- a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java +++ b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java @@ -59,6 +59,7 @@ public class DocumentTypeManagerTestCase { assertSame(DataType.DOUBLE, doubleType); } + @SuppressWarnings("deprecation") @Test public void testRecursiveRegister() { StructDataType struct = new StructDataType("mystruct"); @@ -87,6 +88,7 @@ public class DocumentTypeManagerTestCase { assertEquals(docType2, manager.getDocumentType(new DataTypeName("myotherdoc"))); } + @SuppressWarnings("deprecation") @Test public void testMultipleDocuments() { DocumentType docType1 = new DocumentType("foo0"); @@ -120,6 +122,7 @@ public class DocumentTypeManagerTestCase { assertEquals(manager.getDocumentTypes().get(new DataTypeName("foo1")), docType2); } + @SuppressWarnings("deprecation") @Test public void testReverseMapOrder() { DocumentTypeManager manager = createConfiguredManager("file:src/test/document/documentmanager.map.cfg"); @@ -190,7 +193,7 @@ public class DocumentTypeManagerTestCase { Field arrayfloat = type.getField("arrayfloat"); ArrayDataType dataType = (ArrayDataType) arrayfloat.getDataType(); - assertTrue(dataType.getCode() == 99); + // assertTrue(dataType.getCode() == 99); assertTrue(dataType.getValueClass().equals(Array.class)); assertTrue(dataType.getNestedType().getCode() == 1); assertTrue(dataType.getNestedType().getValueClass().equals(FloatFieldValue.class)); @@ -198,9 +201,9 @@ public class DocumentTypeManagerTestCase { Field arrayarrayfloat = type.getField("arrayarrayfloat"); ArrayDataType subType = (ArrayDataType) arrayarrayfloat.getDataType(); - assertTrue(subType.getCode() == 4003); + // assertTrue(subType.getCode() == 4003); assertTrue(subType.getValueClass().equals(Array.class)); - assertTrue(subType.getNestedType().getCode() == 99); + // assertTrue(subType.getNestedType().getCode() == 99); assertTrue(subType.getNestedType().getValueClass().equals(Array.class)); ArrayDataType subSubType = (ArrayDataType) subType.getNestedType(); assertTrue(subSubType.getNestedType().getCode() == 1); @@ -215,10 +218,10 @@ public class DocumentTypeManagerTestCase { DocumentType customtypes = manager.getDocumentType(new DataTypeName("customtypes")); assertNull(banana.getField("newfield")); - assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT, 99)), customtypes.getField("arrayfloat")); - - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/document/documentmanager.updated.cfg"); + assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT)), customtypes.getField("arrayfloat")); + var sub = DocumentTypeManagerConfigurer.configure(manager, "file:src/test/document/documentmanager.updated.cfg"); + sub.close(); banana = manager.getDocumentType(new DataTypeName("banana")); customtypes = manager.getDocumentType(new DataTypeName("customtypes")); @@ -501,6 +504,7 @@ search annotationsimplicitstruct { assertReferenceTypePresentInManager(manager, 12345678, "referenced_type"); } + @SuppressWarnings("deprecation") private static void assertReferenceTypePresentInManager(DocumentTypeManager manager, int refTypeId, String refTargetTypeName) { DataType type = manager.getDataType(refTypeId); @@ -513,7 +517,8 @@ search annotationsimplicitstruct { private static DocumentTypeManager createConfiguredManager(String configFilePath) { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, configFilePath); + var sub = DocumentTypeManagerConfigurer.configure(manager, configFilePath); + sub.close(); return manager; } @@ -525,6 +530,7 @@ search annotationsimplicitstruct { assertReferenceTypePresentInManager(manager, 87654321, "referenced_type2"); } + @SuppressWarnings("deprecation") @Test public void no_temporary_targets_in_references_or_names() { DocumentTypeManager manager = createConfiguredManager("file:src/test/document/documentmanager.replaced_temporary.cfg"); diff --git a/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java index d44f0880395..53c8a0ecc94 100644 --- a/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java +++ b/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java @@ -72,6 +72,7 @@ public class ReferenceDataTypeTestCase { assertTrue(fixture.refType.isValueCompatible(fixture.refTypeClone.createFieldValue())); } + @SuppressWarnings("deprecation") @Test public void reference_type_can_be_constructed_with_temporary_structured_data_type() { TemporaryStructuredDataType tempType = new TemporaryStructuredDataType("cooldoc"); @@ -81,6 +82,7 @@ public class ReferenceDataTypeTestCase { assertEquals(tempType, refType.getTargetType()); } + @SuppressWarnings("deprecation") @Test public void can_replace_temporary_target_data_type() { TemporaryStructuredDataType tempType = new TemporaryStructuredDataType("cooldoc"); diff --git a/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java index 0e85e085316..80154891d83 100644 --- a/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java +++ b/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java @@ -10,6 +10,7 @@ import static org.junit.Assert.assertNull; /** * @author Einar M R Rosenvinge */ +@SuppressWarnings("deprecation") public class TemporaryDataTypeTestCase { @Test diff --git a/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java index 5e4530d0886..3b5cd29b90d 100644 --- a/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java +++ b/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java @@ -10,6 +10,7 @@ import static org.junit.Assert.assertNotEquals; * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> * @since 5.1.10 */ +@SuppressWarnings("deprecation") public class TemporaryStructuredDataTypeTestCase { @Test public void basic() { diff --git a/document/src/test/java/com/yahoo/document/annotation/Bug4259784TestCase.java b/document/src/test/java/com/yahoo/document/annotation/Bug4259784TestCase.java index 77f0e1a9c20..fcaf96a788f 100644 --- a/document/src/test/java/com/yahoo/document/annotation/Bug4259784TestCase.java +++ b/document/src/test/java/com/yahoo/document/annotation/Bug4259784TestCase.java @@ -23,7 +23,9 @@ public class Bug4259784TestCase { @Test public void testSerialize() { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4259784.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4259784.cfg"); + sub.close(); DocumentType type = manager.getDocumentType("blog"); Document doc = new Document(type, "id:this:blog::is:a:test"); diff --git a/document/src/test/java/com/yahoo/document/annotation/Bug4261985TestCase.java b/document/src/test/java/com/yahoo/document/annotation/Bug4261985TestCase.java index 7692d2643df..ae730ed8cb3 100644 --- a/document/src/test/java/com/yahoo/document/annotation/Bug4261985TestCase.java +++ b/document/src/test/java/com/yahoo/document/annotation/Bug4261985TestCase.java @@ -23,7 +23,9 @@ public class Bug4261985TestCase { @Test public void testAnnotate() { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4261985.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4261985.cfg"); + sub.close(); DocumentType type = manager.getDocumentType("blog"); Document doc = new Document(type, "id:this:blog::is:a:test"); diff --git a/document/src/test/java/com/yahoo/document/annotation/Bug4475379TestCase.java b/document/src/test/java/com/yahoo/document/annotation/Bug4475379TestCase.java index dbbe0cdfedf..69003ebd036 100755 --- a/document/src/test/java/com/yahoo/document/annotation/Bug4475379TestCase.java +++ b/document/src/test/java/com/yahoo/document/annotation/Bug4475379TestCase.java @@ -23,7 +23,9 @@ public class Bug4475379TestCase { @Test public void testClone() { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4475379.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.bug4475379.cfg"); + sub.close(); DocumentType type = manager.getDocumentType("blog"); Document doc = new Document(type, "id:this:blog::is:a:test"); diff --git a/document/src/test/java/com/yahoo/document/annotation/Bug6394548TestCase.java b/document/src/test/java/com/yahoo/document/annotation/Bug6394548TestCase.java index 654965d1ee7..3ec858062be 100644 --- a/document/src/test/java/com/yahoo/document/annotation/Bug6394548TestCase.java +++ b/document/src/test/java/com/yahoo/document/annotation/Bug6394548TestCase.java @@ -19,7 +19,9 @@ public class Bug6394548TestCase { @Test public void testSerializeAndDeserializeMultipleAdjacentStructAnnotations() { DocumentTypeManager manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.6394548.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.6394548.cfg"); + sub.close(); AnnotationTypeRegistry registry = manager.getAnnotationTypeRegistry(); AnnotationType featureSetType = registry.getType("morty.RICK_FEATURESET"); diff --git a/document/src/test/java/com/yahoo/document/annotation/SystemTestCase.java b/document/src/test/java/com/yahoo/document/annotation/SystemTestCase.java index 9978fd1b014..678639c89d9 100755 --- a/document/src/test/java/com/yahoo/document/annotation/SystemTestCase.java +++ b/document/src/test/java/com/yahoo/document/annotation/SystemTestCase.java @@ -114,7 +114,9 @@ public class SystemTestCase { @Before public void setUp() { manager = new DocumentTypeManager(); - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.systemtest.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (manager, "file:src/test/java/com/yahoo/document/annotation/documentmanager.systemtest.cfg"); + sub.close(); } @Test diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java index 527159dbc10..a1c1669ffa1 100644 --- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java +++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java @@ -504,18 +504,22 @@ public class JsonReaderTestCase { assertEquals("smoke", docType.getName()); } + private Document docFromJson(String json) throws IOException { + JsonReader r = createReader(json); + DocumentParseInfo parseInfo = r.parseDocument().get(); + DocumentType docType = r.readDocumentType(parseInfo.documentId); + DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); + new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); + return put.getDocument(); + } + @Test public void testWeightedSet() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testset::whee',", + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testset::whee',", " 'fields': {", " 'actualset': {", " 'nalle': 2,", " 'tralle': 7 }}}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); FieldValue f = doc.getFieldValue(doc.getField("actualset")); assertSame(WeightedSet.class, f.getClass()); WeightedSet<?> w = (WeightedSet<?>) f; @@ -526,16 +530,11 @@ public class JsonReaderTestCase { @Test public void testArray() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testarray::whee',", + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testarray::whee',", " 'fields': {", " 'actualarray': [", " 'nalle',", " 'tralle' ]}}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); FieldValue f = doc.getFieldValue(doc.getField("actualarray")); assertSame(Array.class, f.getClass()); Array<?> a = (Array<?>) f; @@ -546,16 +545,11 @@ public class JsonReaderTestCase { @Test public void testMap() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testmap::whee',", + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testmap::whee',", " 'fields': {", " 'actualmap': {", " 'nalle': 'kalle',", " 'tralle': 'skalle' }}}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); FieldValue f = doc.getFieldValue(doc.getField("actualmap")); assertSame(MapFieldValue.class, f.getClass()); MapFieldValue<?, ?> m = (MapFieldValue<?, ?>) f; @@ -566,16 +560,11 @@ public class JsonReaderTestCase { @Test public void testOldMap() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testmap::whee',", + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testmap::whee',", " 'fields': {", " 'actualmap': [", " { 'key': 'nalle', 'value': 'kalle'},", " { 'key': 'tralle', 'value': 'skalle'} ]}}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); FieldValue f = doc.getFieldValue(doc.getField("actualmap")); assertSame(MapFieldValue.class, f.getClass()); MapFieldValue<?, ?> m = (MapFieldValue<?, ?>) f; @@ -586,14 +575,42 @@ public class JsonReaderTestCase { @Test public void testPositionPositive() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", " 'fields': {", " 'singlepos': 'N63.429722;E10.393333' }}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); + FieldValue f = doc.getFieldValue(doc.getField("singlepos")); + assertSame(Struct.class, f.getClass()); + assertEquals(10393333, PositionDataType.getXValue(f).getInteger()); + assertEquals(63429722, PositionDataType.getYValue(f).getInteger()); + } + + @Test + public void testPositionOld() throws IOException { + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", + " 'fields': {", + " 'singlepos': {'x':10393333,'y':63429722} }}")); + FieldValue f = doc.getFieldValue(doc.getField("singlepos")); + assertSame(Struct.class, f.getClass()); + assertEquals(10393333, PositionDataType.getXValue(f).getInteger()); + assertEquals(63429722, PositionDataType.getYValue(f).getInteger()); + } + + @Test + public void testGeoPosition() throws IOException { + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", + " 'fields': {", + " 'singlepos': {'lat':63.429722,'lng':10.393333} }}")); + FieldValue f = doc.getFieldValue(doc.getField("singlepos")); + assertSame(Struct.class, f.getClass()); + assertEquals(10393333, PositionDataType.getXValue(f).getInteger()); + assertEquals(63429722, PositionDataType.getYValue(f).getInteger()); + } + + @Test + public void testGeoPositionNoAbbreviations() throws IOException { + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", + " 'fields': {", + " 'singlepos': {'latitude':63.429722,'longitude':10.393333} }}")); FieldValue f = doc.getFieldValue(doc.getField("singlepos")); assertSame(Struct.class, f.getClass()); assertEquals(10393333, PositionDataType.getXValue(f).getInteger()); @@ -602,14 +619,9 @@ public class JsonReaderTestCase { @Test public void testPositionNegative() throws IOException { - JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", - " 'fields': {", - " 'singlepos': 'W46.63;S23.55' }}")); - DocumentParseInfo parseInfo = r.parseDocument().get(); - DocumentType docType = r.readDocumentType(parseInfo.documentId); - DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); - new VespaJsonDocumentReader().readPut(parseInfo.fieldsBuffer, put); - Document doc = put.getDocument(); + Document doc = docFromJson(inputJson("{ 'put': 'id:unittest:testsinglepos::bamf',", + " 'fields': {", + " 'singlepos': 'W46.63;S23.55' }}")); FieldValue f = doc.getFieldValue(doc.getField("singlepos")); assertSame(Struct.class, f.getClass()); assertEquals(-46630000, PositionDataType.getXValue(f).getInteger()); diff --git a/document/src/test/java/com/yahoo/document/serialization/SerializeAnnotationsTestCase.java b/document/src/test/java/com/yahoo/document/serialization/SerializeAnnotationsTestCase.java index d27a4ed6326..9e58e2540a0 100644 --- a/document/src/test/java/com/yahoo/document/serialization/SerializeAnnotationsTestCase.java +++ b/document/src/test/java/com/yahoo/document/serialization/SerializeAnnotationsTestCase.java @@ -44,9 +44,9 @@ public class SerializeAnnotationsTestCase { @Before public void setUp() { - DocumentTypeManagerConfigurer.configure(docMan, - "file:src/tests/serialization/" + - "annotation.serialize.test.cfg"); + var sub = DocumentTypeManagerConfigurer.configure + (docMan, "file:src/tests/serialization/annotation.serialize.test.cfg"); + sub.close(); } @Test diff --git a/document/src/test/java/com/yahoo/vespaxmlparser/VespaXMLReaderTestCase.java b/document/src/test/java/com/yahoo/vespaxmlparser/VespaXMLReaderTestCase.java index 769a73d536d..b2fa7014480 100644 --- a/document/src/test/java/com/yahoo/vespaxmlparser/VespaXMLReaderTestCase.java +++ b/document/src/test/java/com/yahoo/vespaxmlparser/VespaXMLReaderTestCase.java @@ -36,7 +36,8 @@ public class VespaXMLReaderTestCase { @Before public void setUp() { - DocumentTypeManagerConfigurer.configure(manager, "file:src/test/vespaxmlparser/documentmanager2.cfg"); + var sub = DocumentTypeManagerConfigurer.configure(manager, "file:src/test/vespaxmlparser/documentmanager2.cfg"); + sub.close(); } @Test diff --git a/document/src/test/vespaxmlparser/alltypes.cfg b/document/src/test/vespaxmlparser/alltypes.cfg deleted file mode 100644 index 5d89611d24b..00000000000 --- a/document/src/test/vespaxmlparser/alltypes.cfg +++ /dev/null @@ -1,101 +0,0 @@ -datatype[5] -datatype[0].id -240642363 -datatype[0].arraytype[0] -datatype[0].weightedsettype[0] -datatype[0].structtype[1] -datatype[0].structtype[0].name alltypes.header -datatype[0].structtype[0].version 0 -datatype[0].structtype[0].field[0] -datatype[0].documenttype[0] -datatype[1].id 1000002 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 2 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 2000001 -datatype[2].arraytype[0] -datatype[2].weightedsettype[1] -datatype[2].weightedsettype[0].datatype 2 -datatype[2].weightedsettype[0].createifnonexistant false -datatype[2].weightedsettype[0].removeifzero false -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 163574298 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name alltypes.body -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].field[20] -datatype[3].structtype[0].field[0].name stringval -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 2 -datatype[3].structtype[0].field[1].name intval1 -datatype[3].structtype[0].field[1].id[0] -datatype[3].structtype[0].field[1].datatype 0 -datatype[3].structtype[0].field[2].name intval2 -datatype[3].structtype[0].field[2].id[0] -datatype[3].structtype[0].field[2].datatype 0 -datatype[3].structtype[0].field[3].name intval3 -datatype[3].structtype[0].field[3].id[0] -datatype[3].structtype[0].field[3].datatype 0 -datatype[3].structtype[0].field[4].name longval1 -datatype[3].structtype[0].field[4].id[0] -datatype[3].structtype[0].field[4].datatype 4 -datatype[3].structtype[0].field[5].name longval2 -datatype[3].structtype[0].field[5].id[0] -datatype[3].structtype[0].field[5].datatype 4 -datatype[3].structtype[0].field[6].name longval3 -datatype[3].structtype[0].field[6].id[0] -datatype[3].structtype[0].field[6].datatype 4 -datatype[3].structtype[0].field[7].name byteval1 -datatype[3].structtype[0].field[7].id[0] -datatype[3].structtype[0].field[7].datatype 16 -datatype[3].structtype[0].field[8].name byteval2 -datatype[3].structtype[0].field[8].id[0] -datatype[3].structtype[0].field[8].datatype 16 -datatype[3].structtype[0].field[9].name byteval3 -datatype[3].structtype[0].field[9].id[0] -datatype[3].structtype[0].field[9].datatype 16 -datatype[3].structtype[0].field[10].name floatval -datatype[3].structtype[0].field[10].id[0] -datatype[3].structtype[0].field[10].datatype 1 -datatype[3].structtype[0].field[11].name doubleval -datatype[3].structtype[0].field[11].id[0] -datatype[3].structtype[0].field[11].datatype 5 -datatype[3].structtype[0].field[12].name rawval1 -datatype[3].structtype[0].field[12].id[0] -datatype[3].structtype[0].field[12].datatype 3 -datatype[3].structtype[0].field[13].name rawval2 -datatype[3].structtype[0].field[13].id[0] -datatype[3].structtype[0].field[13].datatype 3 -datatype[3].structtype[0].field[14].name urival -datatype[3].structtype[0].field[14].id[0] -datatype[3].structtype[0].field[14].datatype 10 -datatype[3].structtype[0].field[15].name contentval1 -datatype[3].structtype[0].field[15].id[0] -datatype[3].structtype[0].field[15].datatype 12 -datatype[3].structtype[0].field[16].name contentval2 -datatype[3].structtype[0].field[16].id[0] -datatype[3].structtype[0].field[16].datatype 12 -datatype[3].structtype[0].field[17].name arrayofstringval -datatype[3].structtype[0].field[17].id[0] -datatype[3].structtype[0].field[17].datatype 1000002 -datatype[3].structtype[0].field[18].name weightedsetofstringval -datatype[3].structtype[0].field[18].id[0] -datatype[3].structtype[0].field[18].datatype 2000001 -datatype[3].structtype[0].field[19].name tagval -datatype[3].structtype[0].field[19].id[0] -datatype[3].structtype[0].field[19].datatype 18 -datatype[3].documenttype[0] -datatype[4].id -1126644934 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[0] -datatype[4].documenttype[1] -datatype[4].documenttype[0].name alltypes -datatype[4].documenttype[0].version 0 -datatype[4].documenttype[0].inherits[0] -datatype[4].documenttype[0].headerstruct -240642363 -datatype[4].documenttype[0].bodystruct 163574298 diff --git a/document/src/test/vespaxmlparser/documentmanager.cfg b/document/src/test/vespaxmlparser/documentmanager.cfg deleted file mode 100644 index 6662f5caab5..00000000000 --- a/document/src/test/vespaxmlparser/documentmanager.cfg +++ /dev/null @@ -1,109 +0,0 @@ -datatype[10] -datatype[0].id 1002 -datatype[0].arraytype[1] -datatype[0].arraytype[0].datatype 2 -datatype[0].weightedsettype[0] -datatype[0].structtype[0] -datatype[0].documenttype[0] -datatype[1].id 1000 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 0 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 1004 -datatype[2].arraytype[1] -datatype[2].arraytype[0].datatype 4 -datatype[2].weightedsettype[0] -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 1016 -datatype[3].arraytype[1] -datatype[3].arraytype[0].datatype 16 -datatype[3].weightedsettype[0] -datatype[3].structtype[0] -datatype[3].documenttype[0] -datatype[4].id 1001 -datatype[4].arraytype[1] -datatype[4].arraytype[0].datatype 1 -datatype[4].weightedsettype[0] -datatype[4].structtype[0] -datatype[4].documenttype[0] -datatype[5].id 2001 -datatype[5].arraytype[0] -datatype[5].weightedsettype[1] -datatype[5].weightedsettype[0].datatype 0 -datatype[5].weightedsettype[0].createifnonexistant false -datatype[5].weightedsettype[0].removeifzero false -datatype[5].structtype[0] -datatype[5].documenttype[0] -datatype[6].id 2002 -datatype[6].arraytype[0] -datatype[6].weightedsettype[1] -datatype[6].weightedsettype[0].datatype 2 -datatype[6].weightedsettype[0].createifnonexistant false -datatype[6].weightedsettype[0].removeifzero false -datatype[6].structtype[0] -datatype[6].documenttype[0] -datatype[7].id -628990518 -datatype[7].arraytype[0] -datatype[7].weightedsettype[0] -datatype[7].structtype[1] -datatype[7].structtype[0].name news.header -datatype[7].structtype[0].version 0 -datatype[7].structtype[0].field[12] -datatype[7].structtype[0].field[0].name url -datatype[7].structtype[0].field[0].id[0] -datatype[7].structtype[0].field[0].datatype 10 -datatype[7].structtype[0].field[1].name title -datatype[7].structtype[0].field[1].id[0] -datatype[7].structtype[0].field[1].datatype 2 -datatype[7].structtype[0].field[2].name last_downloaded -datatype[7].structtype[0].field[2].id[0] -datatype[7].structtype[0].field[2].datatype 0 -datatype[7].structtype[0].field[3].name value_long -datatype[7].structtype[0].field[3].id[0] -datatype[7].structtype[0].field[3].datatype 4 -datatype[7].structtype[0].field[4].name value_content -datatype[7].structtype[0].field[4].id[0] -datatype[7].structtype[0].field[4].datatype 12 -datatype[7].structtype[0].field[5].name stringarr -datatype[7].structtype[0].field[5].id[0] -datatype[7].structtype[0].field[5].datatype 1002 -datatype[7].structtype[0].field[6].name intarr -datatype[7].structtype[0].field[6].id[0] -datatype[7].structtype[0].field[6].datatype 1000 -datatype[7].structtype[0].field[7].name longarr -datatype[7].structtype[0].field[7].id[0] -datatype[7].structtype[0].field[7].datatype 1004 -datatype[7].structtype[0].field[8].name bytearr -datatype[7].structtype[0].field[8].id[0] -datatype[7].structtype[0].field[8].datatype 1016 -datatype[7].structtype[0].field[9].name floatarr -datatype[7].structtype[0].field[9].id[0] -datatype[7].structtype[0].field[9].datatype 1001 -datatype[7].structtype[0].field[10].name weightedsetint -datatype[7].structtype[0].field[10].id[0] -datatype[7].structtype[0].field[10].datatype 2001 -datatype[7].structtype[0].field[11].name weightedsetstring -datatype[7].structtype[0].field[11].id[0] -datatype[7].structtype[0].field[11].datatype 2002 -datatype[7].documenttype[0] -datatype[8].id 538588767 -datatype[8].arraytype[0] -datatype[8].weightedsettype[0] -datatype[8].structtype[1] -datatype[8].structtype[0].name news.body -datatype[8].structtype[0].version 0 -datatype[8].structtype[0].field[0] -datatype[8].documenttype[0] -datatype[9].id -1048827947 -datatype[9].arraytype[0] -datatype[9].weightedsettype[0] -datatype[9].structtype[0] -datatype[9].documenttype[1] -datatype[9].documenttype[0].name news -datatype[9].documenttype[0].version 0 -datatype[9].documenttype[0].inherits[0] -datatype[9].documenttype[0].headerstruct -628990518 -datatype[9].documenttype[0].bodystruct 538588767 diff --git a/document/src/tests/data/defaultdocument.cfg b/document/src/tests/data/defaultdocument.cfg deleted file mode 100644 index 9780f43def6..00000000000 --- a/document/src/tests/data/defaultdocument.cfg +++ /dev/null @@ -1,94 +0,0 @@ -enablecompression false -datatype[6] -datatype[0].id 1000 -datatype[0].arraytype[1] -datatype[0].arraytype[0].datatype 0 -datatype[0].weightedsettype[0] -datatype[0].structtype[0] -datatype[0].documenttype[0] -datatype[1].id 1003 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 3 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 2002 -datatype[2].arraytype[0] -datatype[2].weightedsettype[1] -datatype[2].weightedsettype[0].datatype 2 -datatype[2].weightedsettype[0].createifnonexistant false -datatype[2].weightedsettype[0].removeifzero false -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 5000 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name testdoc.header -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].field[3] -datatype[3].structtype[0].field[0].name intattr -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 0 -datatype[3].structtype[0].field[1].name doubleattr -datatype[3].structtype[0].field[1].id[0] -datatype[3].structtype[0].field[1].datatype 5 -datatype[3].structtype[0].field[2].name floatattr -datatype[3].structtype[0].field[2].id[0] -datatype[3].structtype[0].field[2].datatype 1 -datatype[3].documenttype[0] -datatype[4].id 5001 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[1] -datatype[4].structtype[0].name testdoc.body -datatype[4].structtype[0].version 0 -datatype[4].structtype[0].field[11] -datatype[4].structtype[0].field[0].name stringattr -datatype[4].structtype[0].field[0].id[0] -datatype[4].structtype[0].field[0].datatype 2 -datatype[4].structtype[0].field[1].name stringattr2 -datatype[4].structtype[0].field[1].id[0] -datatype[4].structtype[0].field[1].datatype 2 -datatype[4].structtype[0].field[2].name longattr -datatype[4].structtype[0].field[2].id[0] -datatype[4].structtype[0].field[2].datatype 4 -datatype[4].structtype[0].field[3].name byteattr -datatype[4].structtype[0].field[3].id[0] -datatype[4].structtype[0].field[3].datatype 16 -datatype[4].structtype[0].field[4].name rawattr -datatype[4].structtype[0].field[4].id[0] -datatype[4].structtype[0].field[4].datatype 3 -datatype[4].structtype[0].field[5].name minattr -datatype[4].structtype[0].field[5].id[0] -datatype[4].structtype[0].field[5].datatype 0 -datatype[4].structtype[0].field[6].name minattr2 -datatype[4].structtype[0].field[6].id[0] -datatype[4].structtype[0].field[6].datatype 0 -datatype[4].structtype[0].field[7].name arrayattr -datatype[4].structtype[0].field[7].id[0] -datatype[4].structtype[0].field[7].datatype 1000 -datatype[4].structtype[0].field[8].name rawarrayattr -datatype[4].structtype[0].field[8].id[0] -datatype[4].structtype[0].field[8].datatype 1003 -datatype[4].structtype[0].field[9].name stringweightedsetattr -datatype[4].structtype[0].field[9].id[0] -datatype[4].structtype[0].field[9].datatype 2002 -datatype[4].structtype[0].field[10].name uri -datatype[4].structtype[0].field[10].id[0] -datatype[4].structtype[0].field[10].datatype 2 -datatype[4].structtype[0].field[11].name docfield -datatype[4].structtype[0].field[11].id[0] -datatype[4].structtype[0].field[11].datatype 8 -datatype[4].documenttype[0] -datatype[5].id 5002 -datatype[5].arraytype[0] -datatype[5].weightedsettype[0] -datatype[5].structtype[0] -datatype[5].documenttype[1] -datatype[5].documenttype[0].name testdoc -datatype[5].documenttype[0].version 0 -datatype[5].documenttype[0].inherits[0] -datatype[5].documenttype[0].headerstruct 5000 -datatype[5].documenttype[0].bodystruct 5001 - diff --git a/document/src/tests/fieldpathupdatetestcase.cpp b/document/src/tests/fieldpathupdatetestcase.cpp index d71ef94a595..e4516ab6d70 100644 --- a/document/src/tests/fieldpathupdatetestcase.cpp +++ b/document/src/tests/fieldpathupdatetestcase.cpp @@ -41,7 +41,7 @@ protected: namespace { -document::DocumenttypesConfig getRepoConfig() { +DocumenttypesConfig getRepoConfig() { const int struct2_id = 64; DocumenttypesConfigBuilderHelper builder; builder.document( diff --git a/document/src/tests/select/select_test.cpp b/document/src/tests/select/select_test.cpp index e01564f4b3b..03bd17d47b5 100644 --- a/document/src/tests/select/select_test.cpp +++ b/document/src/tests/select/select_test.cpp @@ -2,6 +2,7 @@ #include <vespa/vespalib/gtest/gtest.h> #include <vespa/document/base/documentid.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/datatype/referencedatatype.h> #include <vespa/document/fieldvalue/document.h> @@ -16,7 +17,6 @@ LOG_SETUP("document_select_test"); using document::Document; using document::DocumentId; -using document::DocumenttypesConfig; using document::DocumentType; using document::DocumentTypeRepo; using document::DocumentTypeRepoFactory; diff --git a/document/src/tests/serialization/annotationserializer_test.cpp b/document/src/tests/serialization/annotationserializer_test.cpp index c3c9b09477b..dee5eaac2b0 100644 --- a/document/src/tests/serialization/annotationserializer_test.cpp +++ b/document/src/tests/serialization/annotationserializer_test.cpp @@ -4,6 +4,7 @@ #include <vespa/document/annotation/alternatespanlist.h> #include <vespa/document/annotation/annotation.h> #include <vespa/document/annotation/spantree.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/fieldvalue/stringfieldvalue.h> #include <vespa/document/serialization/annotationdeserializer.h> #include <vespa/document/serialization/annotationserializer.h> @@ -17,7 +18,6 @@ #include <algorithm> -using document::DocumenttypesConfig; using std::fstream; using std::ostringstream; using std::string; diff --git a/document/src/tests/serialization/vespadocumentserializer_test.cpp b/document/src/tests/serialization/vespadocumentserializer_test.cpp index 443c7d1885a..58a5a539feb 100644 --- a/document/src/tests/serialization/vespadocumentserializer_test.cpp +++ b/document/src/tests/serialization/vespadocumentserializer_test.cpp @@ -5,6 +5,7 @@ #include <vespa/document/annotation/span.h> #include <vespa/document/annotation/spantree.h> #include <vespa/document/config/config-documenttypes.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/annotationreferencedatatype.h> #include <vespa/document/datatype/arraydatatype.h> #include <vespa/document/datatype/documenttype.h> @@ -46,7 +47,6 @@ #include <vespa/document/base/exceptions.h> #include <vespa/vespalib/util/compressionconfig.h> -using document::DocumenttypesConfig; using vespalib::File; using vespalib::Slime; using vespalib::nbostream; diff --git a/document/src/vespa/document/annotation/alternatespanlist.h b/document/src/vespa/document/annotation/alternatespanlist.h index 92c58135431..f21cafedb41 100644 --- a/document/src/vespa/document/annotation/alternatespanlist.h +++ b/document/src/vespa/document/annotation/alternatespanlist.h @@ -12,7 +12,7 @@ class AlternateSpanList : public SpanNode { struct Subtree { SpanList *span_list; double probability; - Subtree() : span_list(0), probability(0.0) {} + Subtree() noexcept : span_list(0), probability(0.0) {} }; std::vector<Subtree> _subtrees; diff --git a/document/src/vespa/document/annotation/annotation.h b/document/src/vespa/document/annotation/annotation.h index 4acd4d2f045..46e5a1a5d41 100644 --- a/document/src/vespa/document/annotation/annotation.h +++ b/document/src/vespa/document/annotation/annotation.h @@ -22,7 +22,7 @@ public: : _type(&type), _node(nullptr), _value(value.release()) {} Annotation(const AnnotationType &annotation) : _type(&annotation), _node(nullptr), _value(nullptr) { } - Annotation() : _type(nullptr), _node(nullptr), _value(nullptr) { } + Annotation() noexcept : _type(nullptr), _node(nullptr), _value(nullptr) { } ~Annotation(); void setType(const AnnotationType * v) { _type = v; } diff --git a/document/src/vespa/document/annotation/span.h b/document/src/vespa/document/annotation/span.h index ab6f61843d1..5f3a78bf98c 100644 --- a/document/src/vespa/document/annotation/span.h +++ b/document/src/vespa/document/annotation/span.h @@ -14,7 +14,7 @@ class Span : public SpanNode { public: typedef std::unique_ptr<Span> UP; - Span(int32_t from_pos=0, int32_t len=0) : _from(from_pos), _length(len) {} + Span(int32_t from_pos=0, int32_t len=0) noexcept : _from(from_pos), _length(len) {} int32_t from() const { return _from; } int32_t length() const { return _length; } diff --git a/document/src/vespa/document/base/testdocrepo.cpp b/document/src/vespa/document/base/testdocrepo.cpp index e6ed37dce92..d48bac0ff74 100644 --- a/document/src/vespa/document/base/testdocrepo.cpp +++ b/document/src/vespa/document/base/testdocrepo.cpp @@ -75,7 +75,7 @@ TestDocRepo::getDocumentType(const vespalib::string &t) const { } DocumenttypesConfig readDocumenttypesConfig(const char *file_name) { - config::FileConfigReader<DocumenttypesConfig> reader(file_name); + ::config::FileConfigReader<DocumenttypesConfig> reader(file_name); return DocumenttypesConfig(*reader.read()); } diff --git a/document/src/vespa/document/base/testdocrepo.h b/document/src/vespa/document/base/testdocrepo.h index 594057887e3..093f354efc4 100644 --- a/document/src/vespa/document/base/testdocrepo.h +++ b/document/src/vespa/document/base/testdocrepo.h @@ -3,6 +3,7 @@ #pragma once #include <vespa/document/config/config-documenttypes.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/datatype.h> namespace document { diff --git a/document/src/vespa/document/config/documentmanager.def b/document/src/vespa/document/config/documentmanager.def index aec24db1282..ec19ba8d802 100644 --- a/document/src/vespa/document/config/documentmanager.def +++ b/document/src/vespa/document/config/documentmanager.def @@ -5,6 +5,9 @@ namespace=document.config ## Whether to enable compression in this process. enablecompression bool default=false +## Prefer "Vespa 8" format for the "position" type +usev8geopositions bool default=false + ## The Id of the datatype. Must be unique, including not ## overlapping with the internal datatypes (defined in datatype.h) datatype[].id int @@ -63,7 +66,7 @@ datatype[].structtype[].field[].datatype int ## (necessarily) causing field incompatibility datatype[].structtype[].field[].detailedtype string default="" -## Specify a document type to inherit +## Specify a struct type to inherit datatype[].structtype[].inherits[].name string ## Version is not in use @@ -104,3 +107,159 @@ annotationtype[].id int annotationtype[].name string annotationtype[].datatype int default=-1 annotationtype[].inherits[].id int + + +# Here starts a new model for how datatypes are configured, where +# everything is per document-type, and each documenttype contains the +# datatypes it defines. Will be used (only?) if the arrays above +# (datatype[] and annotationtype[]) are empty. + + +# Note: we will include the built-in "document" document +# type that all other doctypes inherit from also, in order +# to get all the primitive and built-in types declared +# with an idx we can refer to. + +## Name of the document type. Must be unique. +doctype[].name string + +# Note: indexes are only meaningful as internal references in this +# config; they will typically be sequential (1,2,3,...) in the order +# that they are generated (but nothing should depend on that). + +## Index of this type (as a datatype which can be referred to). +doctype[].idx int + +# Could also use name here? +## Specify document types to inherit +doctype[].inherits[].idx int + +## Index of struct defining document fields +doctype[].contentstruct int + +## Field sets available for this document type +doctype[].fieldsets{}.fields[] string + +## Imported fields (specified outside the document block in the schema) +doctype[].importedfield[].name string + +# Everything below here is configuration of data types defined by +# this document type. + +# Primitive types must be present as built-in static members. + +## Index of primitive type +doctype[].primitivetype[].idx int + +## The name of this primitive type +doctype[].primitivetype[].name string + +# Arrays are the simplest collection type: + +## Index of this array type +doctype[].arraytype[].idx int + +## Index of the element type this array type contains +doctype[].arraytype[].elementtype int + + +# Maps are another collection type: + +## Index of this map type +doctype[].maptype[].idx int + +## Index of the key type used by this map type +doctype[].maptype[].keytype int + +## Index of the key type used by this map type +doctype[].maptype[].valuetype int + + +# Weighted sets are more complicated; +# they can be considered as an collection +# of unique elements where each element has +# an associated weight: + +## Index of this weighted set type +doctype[].wsettype[].idx int + +## Index of the element types contained in this weighted set type +doctype[].wsettype[].elementtype int + +## Should an update to a nonexistent element cause it to be created +doctype[].wsettype[].createifnonexistent bool default=false + +## Should an element in a weighted set be removed if an update changes the weight to 0 +doctype[].wsettype[].removeifzero bool default=false + + +# Tensors have their own type system + +## Index of this tensor type +doctype[].tensortype[].idx int + +## Description of the type of the actual tensors contained +doctype[].tensortype[].detailedtype string + + +# Document references refer to parent documents that a document can +# import fields from: + +## Index of this reference data type: +doctype[].documentref[].idx int + +# Could also use name? +## Index of the document type this reference type refers to: +doctype[].documentref[].targettype int + + +# Annotation types are another world, but are modeled here +# as if they were also datatypes contained inside document types: + +## Index of an annotation type. +doctype[].annotationtype[].idx int + +## Name of the annotation type. +doctype[].annotationtype[].name string + +# Could we somehow avoid this? +## Internal id of this annotation type +doctype[].annotationtype[].internalid int default=-1 + +## Index of contained datatype of the annotation type, if any +doctype[].annotationtype[].datatype int default=-1 + +## Index of annotation type that this type inherits. +doctype[].annotationtype[].inherits[].idx int + + +# Annotation references are field values referring to +# an annotation of a certain annotation type. + +## Index of this annotation reference type +doctype[].annotationref[].idx int + +## Index of the annotation type this annotation reference type refers to +doctype[].annotationref[].annotationtype int + + +# A struct is just a named collections of fields: + +## Index of this struct type +doctype[].structtype[].idx int + +## Name of the struct type. Must be unique within documenttype. +doctype[].structtype[].name string + +## Index of another struct type to inherit +doctype[].structtype[].inherits[].type int + +## Name of a struct field. Must be unique within the struct type. +doctype[].structtype[].field[].name string + +## The "field id" - used in serialized format! +doctype[].structtype[].field[].internalid int + +## Index of the type of this field +doctype[].structtype[].field[].type int + diff --git a/document/src/vespa/document/config/documenttypes.def b/document/src/vespa/document/config/documenttypes.def index 6c453cc9814..2e0483f025b 100644 --- a/document/src/vespa/document/config/documenttypes.def +++ b/document/src/vespa/document/config/documenttypes.def @@ -1,10 +1,13 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -namespace=document +namespace=document.config ## Whether to enable compression in this process. enablecompression bool default=false +## Prefer "Vespa 8" format for the "position" type +usev8geopositions bool default=false + ## The Id of the documenttype. Must be unique among all document types. documenttype[].id int diff --git a/document/src/vespa/document/config/documenttypes_config_fwd.h b/document/src/vespa/document/config/documenttypes_config_fwd.h new file mode 100644 index 00000000000..a1f7cfcb308 --- /dev/null +++ b/document/src/vespa/document/config/documenttypes_config_fwd.h @@ -0,0 +1,16 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +/** + * Forward-declaration of the types + * DocumenttypesConfig and DocumenttypesConfigBuilder + * (globally visible). + **/ + +namespace document::config::internal { +class InternalDocumenttypesType; +} + +using DocumenttypesConfigBuilder = document::config::internal::InternalDocumenttypesType; +using DocumenttypesConfig = const document::config::internal::InternalDocumenttypesType; diff --git a/document/src/vespa/document/repo/configbuilder.h b/document/src/vespa/document/repo/configbuilder.h index 0bcebe7ddf6..4df7a58ebb6 100644 --- a/document/src/vespa/document/repo/configbuilder.h +++ b/document/src/vespa/document/repo/configbuilder.h @@ -4,6 +4,7 @@ #include <vespa/document/base/field.h> #include <vespa/document/config/config-documenttypes.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/datatype.h> #include <vespa/document/datatype/structdatatype.h> #include <vespa/vespalib/stllike/string.h> @@ -136,7 +137,7 @@ struct DocTypeRep { }; class DocumenttypesConfigBuilderHelper { - ::document::DocumenttypesConfigBuilder _config; + ::document::config::DocumenttypesConfigBuilder _config; public: DocumenttypesConfigBuilderHelper() {} @@ -147,7 +148,7 @@ public: const DatatypeConfig &header, const DatatypeConfig &body); - ::document::DocumenttypesConfigBuilder &config() { return _config; } + ::document::config::DocumenttypesConfigBuilder &config() { return _config; } }; } diff --git a/document/src/vespa/document/repo/document_type_repo_factory.h b/document/src/vespa/document/repo/document_type_repo_factory.h index 4dc75a04e38..e256f253fd0 100644 --- a/document/src/vespa/document/repo/document_type_repo_factory.h +++ b/document/src/vespa/document/repo/document_type_repo_factory.h @@ -5,13 +5,10 @@ #include <memory> #include <mutex> #include <map> +#include <vespa/document/config/documenttypes_config_fwd.h> namespace document { -namespace internal { - class InternalDocumenttypesType; -} - class DocumentTypeRepo; /* @@ -19,7 +16,6 @@ class DocumentTypeRepo; * for equal config. */ class DocumentTypeRepoFactory { - using DocumenttypesConfig = const internal::InternalDocumenttypesType; struct DocumentTypeRepoEntry { std::weak_ptr<const DocumentTypeRepo> repo; std::unique_ptr<const DocumenttypesConfig> config; diff --git a/document/src/vespa/document/repo/documenttyperepo.cpp b/document/src/vespa/document/repo/documenttyperepo.cpp index 15730d14a86..3a7769c831e 100644 --- a/document/src/vespa/document/repo/documenttyperepo.cpp +++ b/document/src/vespa/document/repo/documenttyperepo.cpp @@ -2,6 +2,7 @@ #include "documenttyperepo.h" +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/annotationreferencedatatype.h> #include <vespa/document/datatype/arraydatatype.h> #include <vespa/document/datatype/documenttype.h> @@ -229,6 +230,8 @@ struct DataTypeRepo { }; namespace { + + void addAnnotationType(const DocumenttypesConfig::Documenttype::Annotationtype &type, AnnotationTypeRepo &annotations) { auto a = std::make_unique<AnnotationType>(type.id, type.name); diff --git a/document/src/vespa/document/repo/documenttyperepo.h b/document/src/vespa/document/repo/documenttyperepo.h index 87830b0aa1c..82f6c2be954 100644 --- a/document/src/vespa/document/repo/documenttyperepo.h +++ b/document/src/vespa/document/repo/documenttyperepo.h @@ -4,11 +4,11 @@ #include <memory> #include <vespa/vespalib/stllike/string.h> +#include <vespa/document/config/documenttypes_config_fwd.h> namespace document { namespace internal { - class InternalDocumenttypesType; class DocumentTypeMap; } @@ -19,7 +19,6 @@ class DocumentType; class DocumentTypeRepo { public: - using DocumenttypesConfig = const internal::InternalDocumenttypesType; struct Handler { virtual ~Handler() = default; virtual void handle(const DocumentType & type) = 0; diff --git a/document/src/vespa/document/select/resultset.h b/document/src/vespa/document/select/resultset.h index ef5133807a7..1289f61046c 100644 --- a/document/src/vespa/document/select/resultset.h +++ b/document/src/vespa/document/select/resultset.h @@ -20,7 +20,7 @@ class ResultSet static std::vector<ResultSet> _ors; static std::vector<ResultSet> _nots; public: - ResultSet() : _val(0u) { } + ResultSet() noexcept : _val(0u) { } static uint32_t enumToMask(uint32_t rhs) { return 1u << rhs; diff --git a/documentapi/abi-spec.json b/documentapi/abi-spec.json index 78a58f24a65..88ec090d324 100644 --- a/documentapi/abi-spec.json +++ b/documentapi/abi-spec.json @@ -1912,6 +1912,7 @@ "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.lang.String, com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.util.Map)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -1937,6 +1938,7 @@ "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder defaultRoute(java.lang.String)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Route$Builder)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(java.util.function.Consumer)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(java.util.List)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder selector(java.lang.String)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster build()" @@ -2133,6 +2135,7 @@ "public void <init>()", "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig)", "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Route$Builder)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(java.util.function.Consumer)", "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/documentapi/src/tests/messagebus/messagebus_test.cpp b/documentapi/src/tests/messagebus/messagebus_test.cpp index 7dd47a0b93e..af129a35660 100644 --- a/documentapi/src/tests/messagebus/messagebus_test.cpp +++ b/documentapi/src/tests/messagebus/messagebus_test.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/document/base/testdocrepo.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/update/documentupdate.h> diff --git a/documentapi/src/tests/messages/testbase.cpp b/documentapi/src/tests/messages/testbase.cpp index 343f7d0b0ae..db2c08704e1 100644 --- a/documentapi/src/tests/messages/testbase.cpp +++ b/documentapi/src/tests/messages/testbase.cpp @@ -3,6 +3,7 @@ #include "testbase.h" #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/base/testdocrepo.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/vespalib/util/exception.h> #include <vespa/vespalib/io/fileutil.h> #include <fcntl.h> diff --git a/documentapi/src/tests/policies/policies_test.cpp b/documentapi/src/tests/policies/policies_test.cpp index f17114d8cd7..54038ce741a 100644 --- a/documentapi/src/tests/policies/policies_test.cpp +++ b/documentapi/src/tests/policies/policies_test.cpp @@ -18,6 +18,7 @@ #include <vespa/messagebus/testlib/testserver.h> #include <vespa/vdslib/state/clusterstate.h> #include <vespa/document/base/testdocrepo.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/fieldvalue/longfieldvalue.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/repo/documenttyperepo.h> diff --git a/documentapi/src/vespa/documentapi/messagebus/policies/loadbalancer.h b/documentapi/src/vespa/documentapi/messagebus/policies/loadbalancer.h index 19975bd9465..6627fc57ef2 100644 --- a/documentapi/src/vespa/documentapi/messagebus/policies/loadbalancer.h +++ b/documentapi/src/vespa/documentapi/messagebus/policies/loadbalancer.h @@ -10,7 +10,7 @@ class LoadBalancer { public: class NodeInfo { public: - NodeInfo() : valid(false), sent(0), busy(0), weight(1.0) {}; + NodeInfo() noexcept : valid(false), sent(0), busy(0), weight(1.0) {}; bool valid; uint32_t sent; diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt index 99c7e9c68b8..2e0af3acfa7 100644 --- a/eval/CMakeLists.txt +++ b/eval/CMakeLists.txt @@ -70,6 +70,7 @@ vespa_define_module( src/tests/instruction/index_lookup_table src/tests/instruction/inplace_map_function src/tests/instruction/join_with_number + src/tests/instruction/l2_distance src/tests/instruction/mixed_inner_product_function src/tests/instruction/mixed_simple_join_function src/tests/instruction/pow_as_map_optimizer diff --git a/eval/src/tests/ann/for-sift-hit.h b/eval/src/tests/ann/for-sift-hit.h index 0c920fe8109..4002dff84ee 100644 --- a/eval/src/tests/ann/for-sift-hit.h +++ b/eval/src/tests/ann/for-sift-hit.h @@ -5,6 +5,6 @@ struct Hit { uint32_t docid; double distance; - Hit() : docid(0u), distance(0.0) {} + Hit() noexcept : docid(0u), distance(0.0) {} Hit(int id, double dist) : docid(id), distance(dist) {} }; diff --git a/eval/src/tests/ann/xp-lsh-nns.cpp b/eval/src/tests/ann/xp-lsh-nns.cpp index bdc61a39610..1557da0b84c 100644 --- a/eval/src/tests/ann/xp-lsh-nns.cpp +++ b/eval/src/tests/ann/xp-lsh-nns.cpp @@ -130,7 +130,7 @@ struct LshHit { double distance; uint32_t docid; int hash_distance; - LshHit() : distance(0.0), docid(0u), hash_distance(0) {} + LshHit() noexcept : distance(0.0), docid(0u), hash_distance(0) {} LshHit(int id, double dist, int hd = 0) : distance(dist), docid(id), hash_distance(hd) {} }; diff --git a/eval/src/tests/instruction/l2_distance/CMakeLists.txt b/eval/src/tests/instruction/l2_distance/CMakeLists.txt new file mode 100644 index 00000000000..1e0fc69a3f9 --- /dev/null +++ b/eval/src/tests/instruction/l2_distance/CMakeLists.txt @@ -0,0 +1,10 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +vespa_add_executable(eval_l2_distance_test_app TEST + SOURCES + l2_distance_test.cpp + DEPENDS + vespaeval + GTest::GTest +) +vespa_add_test(NAME eval_l2_distance_test_app COMMAND eval_l2_distance_test_app) diff --git a/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp new file mode 100644 index 00000000000..2cba9dfb18e --- /dev/null +++ b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp @@ -0,0 +1,96 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/eval/eval/fast_value.h> +#include <vespa/eval/eval/tensor_function.h> +#include <vespa/eval/eval/test/eval_fixture.h> +#include <vespa/eval/eval/test/gen_spec.h> +#include <vespa/eval/instruction/l2_distance.h> +#include <vespa/vespalib/util/stash.h> +#include <vespa/vespalib/util/stringfmt.h> + +#include <vespa/vespalib/util/require.h> +#include <vespa/vespalib/gtest/gtest.h> + +using namespace vespalib; +using namespace vespalib::eval; +using namespace vespalib::eval::test; + +const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); + +//----------------------------------------------------------------------------- + +void verify(const TensorSpec &a, const TensorSpec &b, const vespalib::string &expr, bool optimized = true) { + EvalFixture::ParamRepo param_repo; + param_repo.add("a", a).add("b", b); + EvalFixture fast_fixture(prod_factory, expr, param_repo, true); + EXPECT_EQ(fast_fixture.result(), EvalFixture::ref(expr, param_repo)); + EXPECT_EQ(fast_fixture.find_all<L2Distance>().size(), optimized ? 1 : 0); +} + +void verify_cell_types(GenSpec a, GenSpec b, const vespalib::string &expr, bool optimized = true) { + for (CellType act : CellTypeUtils::list_types()) { + for (CellType bct : CellTypeUtils::list_types()) { + if (optimized && (act == bct) && (act != CellType::BFLOAT16)) { + verify(a.cpy().cells(act), b.cpy().cells(bct), expr, true); + } else { + verify(a.cpy().cells(act), b.cpy().cells(bct), expr, false); + } + } + } +} + +//----------------------------------------------------------------------------- + +GenSpec gen(const vespalib::string &desc, int bias) { + return GenSpec::from_desc(desc).cells(CellType::FLOAT).seq(N(bias)); +} + +//----------------------------------------------------------------------------- + +vespalib::string sq_l2 = "reduce((a-b)^2,sum)"; +vespalib::string alt_sq_l2 = "reduce(map((a-b),f(x)(x*x)),sum)"; + +//----------------------------------------------------------------------------- + +TEST(L2DistanceTest, squared_l2_distance_can_be_optimized) { + verify_cell_types(gen("x5", 3), gen("x5", 7), sq_l2); + verify_cell_types(gen("x5", 3), gen("x5", 7), alt_sq_l2); +} + +TEST(L2DistanceTest, trivial_dimensions_are_ignored) { + verify(gen("x5y1", 3), gen("x5", 7), sq_l2); + verify(gen("x5", 3), gen("x5y1", 7), sq_l2); +} + +TEST(L2DistanceTest, multiple_dimensions_can_be_used) { + verify(gen("x5y3", 3), gen("x5y3", 7), sq_l2); +} + +//----------------------------------------------------------------------------- + +TEST(L2DistanceTest, inputs_must_be_dense) { + verify(gen("x5_1", 3), gen("x5_1", 7), sq_l2, false); + verify(gen("x5_1y3", 3), gen("x5_1y3", 7), sq_l2, false); + verify(gen("x5", 3), GenSpec(7), sq_l2, false); + verify(GenSpec(3), gen("x5", 7), sq_l2, false); +} + +TEST(L2DistanceTest, result_must_be_double) { + verify(gen("x5y1", 3), gen("x5y1", 7), "reduce((a-b)^2,sum,x)", false); + verify(gen("x5y1_1", 3), gen("x5y1_1", 7), "reduce((a-b)^2,sum,x)", false); +} + +TEST(L2DistanceTest, dimensions_must_match) { + verify(gen("x5y3", 3), gen("x5", 7), sq_l2, false); + verify(gen("x5", 3), gen("x5y3", 7), sq_l2, false); +} + +TEST(L2DistanceTest, similar_expressions_are_not_optimized) { + verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^2,prod)", false); + verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^3,sum)", false); + verify(gen("x5", 3), gen("x5", 7), "reduce((a+b)^2,sum)", false); +} + +//----------------------------------------------------------------------------- + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/eval/src/vespa/eval/eval/aggr.h b/eval/src/vespa/eval/eval/aggr.h index a932d0ac932..133d9b520cd 100644 --- a/eval/src/vespa/eval/eval/aggr.h +++ b/eval/src/vespa/eval/eval/aggr.h @@ -90,7 +90,7 @@ private: size_t _cnt; public: using value_type = T; - constexpr Avg() : _sum{0}, _cnt{0} {} + constexpr Avg() noexcept : _sum{0}, _cnt{0} {} constexpr Avg(T value) : _sum{value}, _cnt{1} {} constexpr void sample(T value) { _sum += value; @@ -109,7 +109,7 @@ private: size_t _cnt; public: using value_type = T; - constexpr Count() : _cnt{0} {} + constexpr Count() noexcept : _cnt{0} {} constexpr Count(T) : _cnt{1} {} constexpr void sample(T) { ++_cnt; } constexpr void merge(const Count &rhs) { _cnt += rhs._cnt; } @@ -122,7 +122,7 @@ private: T _prod; public: using value_type = T; - constexpr Prod() : _prod{null_value()} {} + constexpr Prod() noexcept : _prod{null_value()} {} constexpr Prod(T value) : _prod{value} {} constexpr void sample(T value) { _prod = combine(_prod, value); } constexpr void merge(const Prod &rhs) { _prod = combine(_prod, rhs._prod); } @@ -137,7 +137,7 @@ private: T _sum; public: using value_type = T; - constexpr Sum() : _sum{null_value()} {} + constexpr Sum() noexcept : _sum{null_value()} {} constexpr Sum(T value) : _sum{value} {} constexpr void sample(T value) { _sum = combine(_sum, value); } constexpr void merge(const Sum &rhs) { _sum = combine(_sum, rhs._sum); } @@ -152,7 +152,7 @@ private: T _max; public: using value_type = T; - constexpr Max() : _max{null_value()} {} + constexpr Max() noexcept : _max{null_value()} {} constexpr Max(T value) : _max{value} {} constexpr void sample(T value) { _max = combine(_max, value); } constexpr void merge(const Max &rhs) { _max = combine(_max, rhs._max); } @@ -167,7 +167,7 @@ private: std::vector<T> _seen; public: using value_type = T; - constexpr Median() : _seen() {} + constexpr Median() noexcept : _seen() {} constexpr Median(T value) : _seen({value}) {} constexpr void sample(T value) { _seen.push_back(value); } constexpr void merge(const Median &rhs) { @@ -205,7 +205,7 @@ private: T _min; public: using value_type = T; - constexpr Min() : _min{null_value()} {} + constexpr Min() noexcept : _min{null_value()} {} constexpr Min(T value) : _min{value} {} constexpr void sample(T value) { _min = combine(_min, value); } constexpr void merge(const Min &rhs) { _min = combine(_min, rhs._min); } diff --git a/eval/src/vespa/eval/eval/hamming_distance.h b/eval/src/vespa/eval/eval/hamming_distance.h index 50c59c46a60..e7cfc88661d 100644 --- a/eval/src/vespa/eval/eval/hamming_distance.h +++ b/eval/src/vespa/eval/eval/hamming_distance.h @@ -5,8 +5,8 @@ namespace vespalib::eval { inline double hamming_distance(double a, double b) { - uint8_t x = (uint8_t) a; - uint8_t y = (uint8_t) b; + uint8_t x = (uint8_t) (int8_t) a; + uint8_t y = (uint8_t) (int8_t) b; return __builtin_popcount(x ^ y); } diff --git a/eval/src/vespa/eval/eval/llvm/compile_cache.cpp b/eval/src/vespa/eval/eval/llvm/compile_cache.cpp index a439520677a..43ed724e010 100644 --- a/eval/src/vespa/eval/eval/llvm/compile_cache.cpp +++ b/eval/src/vespa/eval/eval/llvm/compile_cache.cpp @@ -4,8 +4,7 @@ #include <vespa/eval/eval/key_gen.h> #include <thread> -namespace vespalib { -namespace eval { +namespace vespalib::eval { std::mutex CompileCache::_lock{}; CompileCache::Map CompileCache::_cached{}; @@ -148,5 +147,4 @@ CompileCache::CompileTask::run() result->cond.notify_all(); } -} // namespace vespalib::eval -} // namespace vespalib +} diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp index 8c1e2fb525c..512e12bec71 100644 --- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp +++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp @@ -725,7 +725,8 @@ LLVMWrapper::compile(llvm::raw_ostream * dumpStream) if (dumpStream) { _module->print(*dumpStream, nullptr); } - _engine.reset(llvm::EngineBuilder(std::move(_module)).setOptLevel(llvm::CodeGenOpt::Aggressive).create()); + // Set relocation model to silence valgrind on CentOS 8 / aarch64 + _engine.reset(llvm::EngineBuilder(std::move(_module)).setOptLevel(llvm::CodeGenOpt::Aggressive).setRelocationModel(llvm::Reloc::Static).create()); assert(_engine && "llvm jit not available for your platform"); _engine->finalizeObject(); } diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp index 09814cc0b06..e1520d4deb2 100644 --- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp +++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp @@ -30,6 +30,7 @@ #include <vespa/eval/instruction/dense_tensor_create_function.h> #include <vespa/eval/instruction/dense_tensor_peek_function.h> #include <vespa/eval/instruction/dense_hamming_distance.h> +#include <vespa/eval/instruction/l2_distance.h> #include <vespa/log/log.h> LOG_SETUP(".eval.eval.optimize_tensor_function"); @@ -56,11 +57,16 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te Child root(expr); run_optimize_pass(root, [&stash](const Child &child) { + child.set(PowAsMapOptimizer::optimize(child.get(), stash)); + }); + run_optimize_pass(root, [&stash](const Child &child) + { child.set(SumMaxDotProductFunction::optimize(child.get(), stash)); }); run_optimize_pass(root, [&stash](const Child &child) { child.set(BestSimilarityFunction::optimize(child.get(), stash)); + child.set(L2Distance::optimize(child.get(), stash)); }); run_optimize_pass(root, [&stash](const Child &child) { @@ -83,7 +89,6 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te child.set(DenseLambdaPeekOptimizer::optimize(child.get(), stash)); child.set(UnpackBitsFunction::optimize(child.get(), stash)); child.set(FastRenameOptimizer::optimize(child.get(), stash)); - child.set(PowAsMapOptimizer::optimize(child.get(), stash)); child.set(InplaceMapFunction::optimize(child.get(), stash)); child.set(MixedSimpleJoinFunction::optimize(child.get(), stash)); child.set(JoinWithNumberFunction::optimize(child.get(), stash)); diff --git a/eval/src/vespa/eval/eval/typed_cells.h b/eval/src/vespa/eval/eval/typed_cells.h index 872488527c2..b8640698d13 100644 --- a/eval/src/vespa/eval/eval/typed_cells.h +++ b/eval/src/vespa/eval/eval/typed_cells.h @@ -20,8 +20,8 @@ struct TypedCells { explicit TypedCells(ConstArrayRef<BFloat16> cells) : data(cells.begin()), type(CellType::BFLOAT16), size(cells.size()) {} explicit TypedCells(ConstArrayRef<Int8Float> cells) : data(cells.begin()), type(CellType::INT8), size(cells.size()) {} - TypedCells() : data(nullptr), type(CellType::DOUBLE), size(0) {} - TypedCells(const void *dp, CellType ct, size_t sz) : data(dp), type(ct), size(sz) {} + TypedCells() noexcept : data(nullptr), type(CellType::DOUBLE), size(0) {} + TypedCells(const void *dp, CellType ct, size_t sz) noexcept : data(dp), type(ct), size(sz) {} template <typename T> bool check_type() const { return vespalib::eval::check_cell_type<T>(type); } diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt index a462ece4734..56184c113d4 100644 --- a/eval/src/vespa/eval/instruction/CMakeLists.txt +++ b/eval/src/vespa/eval/instruction/CMakeLists.txt @@ -30,6 +30,7 @@ vespa_add_library(eval_instruction OBJECT index_lookup_table.cpp inplace_map_function.cpp join_with_number_function.cpp + l2_distance.cpp mixed_inner_product_function.cpp mixed_simple_join_function.cpp pow_as_map_optimizer.cpp diff --git a/eval/src/vespa/eval/instruction/l2_distance.cpp b/eval/src/vespa/eval/instruction/l2_distance.cpp new file mode 100644 index 00000000000..3f1e7632431 --- /dev/null +++ b/eval/src/vespa/eval/instruction/l2_distance.cpp @@ -0,0 +1,96 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "l2_distance.h" +#include <vespa/eval/eval/operation.h> +#include <vespa/eval/eval/value.h> +#include <vespa/vespalib/hwaccelrated/iaccelrated.h> +#include <vespa/vespalib/util/require.h> + +#include <vespa/log/log.h> +LOG_SETUP(".eval.instruction.l2_distance"); + +namespace vespalib::eval { + +using namespace tensor_function; + +namespace { + +static const auto &hw = hwaccelrated::IAccelrated::getAccelerator(); + +template <typename T> +double sq_l2(const Value &lhs, const Value &rhs, size_t len) { + return hw.squaredEuclideanDistance((const T *)lhs.cells().data, (const T *)rhs.cells().data, len); +} + +template <> +double sq_l2<Int8Float>(const Value &lhs, const Value &rhs, size_t len) { + return sq_l2<int8_t>(lhs, rhs, len); +} + +template <typename CT> +void my_squared_l2_distance_op(InterpretedFunction::State &state, uint64_t vector_size) { + double result = sq_l2<CT>(state.peek(1), state.peek(0), vector_size); + state.pop_pop_push(state.stash.create<DoubleValue>(result)); +} + +struct SelectOp { + template <typename CT> + static InterpretedFunction::op_function invoke() { + constexpr bool is_bfloat16 = std::is_same_v<CT, BFloat16>; + if constexpr (!is_bfloat16) { + return my_squared_l2_distance_op<CT>; + } else { + abort(); + } + } +}; + +bool compatible_cell_types(CellType lhs, CellType rhs) { + return ((lhs == rhs) && ((lhs == CellType::INT8) || + (lhs == CellType::FLOAT) || + (lhs == CellType::DOUBLE))); +} + +bool compatible_types(const ValueType &lhs, const ValueType &rhs) { + return (compatible_cell_types(lhs.cell_type(), rhs.cell_type()) && + lhs.is_dense() && rhs.is_dense() && + (lhs.nontrivial_indexed_dimensions() == rhs.nontrivial_indexed_dimensions())); +} + +} // namespace <unnamed> + +L2Distance::L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in) + : tensor_function::Op2(ValueType::double_type(), lhs_in, rhs_in) +{ +} + +InterpretedFunction::Instruction +L2Distance::compile_self(const ValueBuilderFactory &, Stash &) const +{ + auto lhs_t = lhs().result_type(); + auto rhs_t = rhs().result_type(); + REQUIRE_EQ(lhs_t.cell_type(), rhs_t.cell_type()); + REQUIRE_EQ(lhs_t.dense_subspace_size(), rhs_t.dense_subspace_size()); + auto op = typify_invoke<1, TypifyCellType, SelectOp>(lhs_t.cell_type()); + return InterpretedFunction::Instruction(op, lhs_t.dense_subspace_size()); +} + +const TensorFunction & +L2Distance::optimize(const TensorFunction &expr, Stash &stash) +{ + auto reduce = as<Reduce>(expr); + if (reduce && (reduce->aggr() == Aggr::SUM) && expr.result_type().is_double()) { + auto map = as<Map>(reduce->child()); + if (map && (map->function() == operation::Square::f)) { + auto join = as<Join>(map->child()); + if (join && (join->function() == operation::Sub::f)) { + if (compatible_types(join->lhs().result_type(), join->rhs().result_type())) { + return stash.create<L2Distance>(join->lhs(), join->rhs()); + } + } + } + } + return expr; +} + +} // namespace diff --git a/eval/src/vespa/eval/instruction/l2_distance.h b/eval/src/vespa/eval/instruction/l2_distance.h new file mode 100644 index 00000000000..95b11b6c229 --- /dev/null +++ b/eval/src/vespa/eval/instruction/l2_distance.h @@ -0,0 +1,21 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/eval/eval/tensor_function.h> + +namespace vespalib::eval { + +/** + * Tensor function for a squared euclidean distance producing a scalar result. + **/ +class L2Distance : public tensor_function::Op2 +{ +public: + L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in); + InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override; + bool result_is_mutable() const override { return true; } + static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash); +}; + +} // namespace diff --git a/fastos/src/vespa/fastos/file.cpp b/fastos/src/vespa/fastos/file.cpp index 0764c9b1b66..1382aef7386 100644 --- a/fastos/src/vespa/fastos/file.cpp +++ b/fastos/src/vespa/fastos/file.cpp @@ -39,7 +39,7 @@ static const size_t MAX_WRITE_CHUNK_SIZE = 0x4000000; // 64 MB FastOS_FileInterface::FastOS_FileInterface(const char *filename) : _fAdviseOptions(_defaultFAdviseOptions), _writeChunkSize(MAX_WRITE_CHUNK_SIZE), - _filename(nullptr), + _filename(), _openFlags(0), _directIOEnabled(false), _syncWritesEnabled(false) @@ -49,10 +49,7 @@ FastOS_FileInterface::FastOS_FileInterface(const char *filename) } -FastOS_FileInterface::~FastOS_FileInterface() -{ - free(_filename); -} +FastOS_FileInterface::~FastOS_FileInterface() = default; bool FastOS_FileInterface::InitializeClass () { @@ -358,18 +355,14 @@ FastOS_FileInterface::MakeDirIfNotPresentOrExit(const char *name) void FastOS_FileInterface::SetFileName(const char *filename) { - if (_filename != nullptr) { - free(_filename); - } - - _filename = strdup(filename); + _filename = filename; } const char * FastOS_FileInterface::GetFileName() const { - return (_filename != nullptr) ? _filename : ""; + return _filename.c_str(); } @@ -502,11 +495,8 @@ void FastOS_FileInterface::dropFromCache() const } FastOS_DirectoryScanInterface::FastOS_DirectoryScanInterface(const char *path) - : _searchPath(strdup(path)) + : _searchPath(path) { } -FastOS_DirectoryScanInterface::~FastOS_DirectoryScanInterface() -{ - free(_searchPath); -} +FastOS_DirectoryScanInterface::~FastOS_DirectoryScanInterface() = default; diff --git a/fastos/src/vespa/fastos/file.h b/fastos/src/vespa/fastos/file.h index 40b33e49b35..2d83a1766f0 100644 --- a/fastos/src/vespa/fastos/file.h +++ b/fastos/src/vespa/fastos/file.h @@ -88,7 +88,7 @@ private: void WriteBufInternal(const void *buffer, size_t length); protected: - char *_filename; + std::string _filename; unsigned int _openFlags; bool _directIOEnabled; bool _syncWritesEnabled; @@ -726,7 +726,7 @@ private: FastOS_DirectoryScanInterface& operator= (const FastOS_DirectoryScanInterface&); protected: - char *_searchPath; + std::string _searchPath; public: @@ -750,7 +750,7 @@ public: * This is an internal copy of the path specified in the constructor. * @return Search path string. */ - const char *GetSearchPath () { return _searchPath; } + const char *GetSearchPath () { return _searchPath.c_str(); } /** * Read the next entry in the directory scan. Failure indicates diff --git a/fastos/src/vespa/fastos/process.cpp b/fastos/src/vespa/fastos/process.cpp index 29c53fe9326..332d82c6aad 100644 --- a/fastos/src/vespa/fastos/process.cpp +++ b/fastos/src/vespa/fastos/process.cpp @@ -1,14 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "process.h" -#include <cstring> FastOS_ProcessInterface::FastOS_ProcessInterface (const char *cmdLine, bool pipeStdin, FastOS_ProcessRedirectListener *stdoutListener, FastOS_ProcessRedirectListener *stderrListener, int bufferSize) : - _cmdLine(nullptr), + _cmdLine(cmdLine), _pipeStdin(pipeStdin), _stdoutListener(stdoutListener), _stderrListener(stderrListener), @@ -16,10 +15,6 @@ FastOS_ProcessInterface::FastOS_ProcessInterface (const char *cmdLine, _next(nullptr), _prev(nullptr) { - _cmdLine = strdup(cmdLine); } -FastOS_ProcessInterface::~FastOS_ProcessInterface () -{ - free (_cmdLine); -} +FastOS_ProcessInterface::~FastOS_ProcessInterface () = default; diff --git a/fastos/src/vespa/fastos/process.h b/fastos/src/vespa/fastos/process.h index 99f045d2f56..25d5224817a 100644 --- a/fastos/src/vespa/fastos/process.h +++ b/fastos/src/vespa/fastos/process.h @@ -12,6 +12,7 @@ #include "types.h" #include <cstddef> +#include <string> /** * This class serves as a sink for redirected (piped) output from @@ -52,8 +53,8 @@ private: protected: - char *_cmdLine; - bool _pipeStdin; + std::string _cmdLine; + bool _pipeStdin; FastOS_ProcessRedirectListener *_stdoutListener; FastOS_ProcessRedirectListener *_stderrListener; @@ -179,10 +180,7 @@ public: * Get command line string. * @return Command line string */ - const char *GetCommandLine () - { - return _cmdLine; - } + const char *GetCommandLine () const { return _cmdLine.c_str(); } }; #include <vespa/fastos/unix_process.h> diff --git a/fastos/src/vespa/fastos/unix_file.cpp b/fastos/src/vespa/fastos/unix_file.cpp index 2ef8c2f55ff..8dd589d5144 100644 --- a/fastos/src/vespa/fastos/unix_file.cpp +++ b/fastos/src/vespa/fastos/unix_file.cpp @@ -258,7 +258,7 @@ FastOS_UNIX_File::Open(unsigned int openFlags, const char *filename) } unsigned int accessFlags = CalcAccessFlags(openFlags); - _filedes = open(_filename, accessFlags, 0664); + _filedes = open(_filename.c_str(), accessFlags, 0664); rc = (_filedes != -1); @@ -386,10 +386,9 @@ FastOS_UNIX_File::Delete(const char *name) bool FastOS_UNIX_File::Delete(void) { - assert(!IsOpened()); - assert(_filename != nullptr); + assert( ! IsOpened()); - return (unlink(_filename) == 0); + return (unlink(_filename.c_str()) == 0); } bool FastOS_UNIX_File::Rename (const char *currentFileName, const char *newFileName) diff --git a/fbench/src/fbench/fbench.cpp b/fbench/src/fbench/fbench.cpp index 92facaa3cae..0cd9498258e 100644 --- a/fbench/src/fbench/fbench.cpp +++ b/fbench/src/fbench/fbench.cpp @@ -443,7 +443,7 @@ FBench::Main(int argc, char *argv[]) keepAlive = false; break; case 'd': - base64Decode = false; + base64Decode = true; break; case 'x': // consuming x for backwards compability. This turned on header benchmark data diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java index aab2f5a53fd..1821c8971e7 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java @@ -6,7 +6,6 @@ import com.yahoo.jrt.Supervisor; import com.yahoo.vespa.config.Connection; import com.yahoo.vespa.config.ConnectionPool; import com.yahoo.vespa.defaults.Defaults; -import com.yahoo.yolean.Exceptions; import java.io.File; import java.time.Duration; @@ -29,7 +28,7 @@ public class FileDownloader implements AutoCloseable { private static final Logger log = Logger.getLogger(FileDownloader.class.getName()); private static final Duration defaultTimeout = Duration.ofMinutes(3); - private static final Duration defaultSleepBetweenRetries = Duration.ofSeconds(10); + private static final Duration defaultSleepBetweenRetries = Duration.ofSeconds(5); public static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/filedistribution")); private final ConnectionPool connectionPool; @@ -47,8 +46,8 @@ public class FileDownloader implements AutoCloseable { this(connectionPool, supervisor, defaultDownloadDirectory, timeout, defaultSleepBetweenRetries); } - public FileDownloader(ConnectionPool connectionPool, Supervisor supervisor, File downloadDirectory) { - this(connectionPool, supervisor, downloadDirectory, defaultTimeout, defaultSleepBetweenRetries); + public FileDownloader(ConnectionPool connectionPool, Supervisor supervisor, File downloadDirectory, Duration timeout) { + this(connectionPool, supervisor, downloadDirectory, timeout, defaultSleepBetweenRetries); } public FileDownloader(ConnectionPool connectionPool, @@ -69,16 +68,10 @@ public class FileDownloader implements AutoCloseable { downloadDirectory); } - public Optional<File> getFile(FileReference fileReference) { - return getFile(new FileReferenceDownload(fileReference)); - } - public Optional<File> getFile(FileReferenceDownload fileReferenceDownload) { try { return getFutureFile(fileReferenceDownload).get(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { - log.log(Level.WARNING, "Failed downloading '" + fileReferenceDownload + - "', removing from download queue: " + Exceptions.toMessageString(e)); fileReferenceDownloader.failedDownloading(fileReferenceDownload.fileReference()); return Optional.empty(); } diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java index 89a77599909..a2c5fee1e51 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java @@ -131,9 +131,7 @@ public class FileReceiver { throw new RuntimeException("Failed writing file: ", e); } finally { try { - if (inprogressFile.exists()) { - Files.delete(inprogressFile.toPath()); - } + Files.deleteIfExists(inprogressFile.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } @@ -191,13 +189,14 @@ public class FileReceiver { } catch (FileAlreadyExistsException e) { // Don't fail if it already exists (we might get the file from several config servers when retrying, servers are down etc. // so it might be written already). Delete temp file/dir in that case, to avoid filling the disk. - log.log(Level.FINE, () -> "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + destination.getAbsolutePath() + - "', '" + destination.getAbsolutePath() + "' already exists"); - deleteFileOrDirectory(tempFile); + log.log(Level.FINE, () -> "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + + destination.getAbsolutePath() + "', it already exists"); } catch (IOException e) { String message = "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + destination.getAbsolutePath() + "'"; log.log(Level.SEVERE, message, e); throw new RuntimeException(message, e); + } finally { + deleteFileOrDirectory(tempFile); } } diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownload.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownload.java index 21e35bf67af..8d6f428eaef 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownload.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownload.java @@ -18,11 +18,11 @@ public class FileReferenceDownload { private final boolean downloadFromOtherSourceIfNotFound; private final String client; - public FileReferenceDownload(FileReference fileReference) { - this(fileReference, true, "unknown"); + public FileReferenceDownload(FileReference fileReference, String client) { + this(fileReference, client, true); } - public FileReferenceDownload(FileReference fileReference, boolean downloadFromOtherSourceIfNotFound, String client) { + public FileReferenceDownload(FileReference fileReference, String client, boolean downloadFromOtherSourceIfNotFound) { Objects.requireNonNull(fileReference, "file reference cannot be null"); this.fileReference = fileReference; this.future = new CompletableFuture<>(); diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java index 7b24098526c..21a9833c3e0 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java @@ -11,6 +11,7 @@ import com.yahoo.vespa.config.ConnectionPool; import java.io.File; import java.time.Duration; +import java.time.Instant; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -53,34 +54,46 @@ public class FileReferenceDownloader { } private void waitUntilDownloadStarted(FileReferenceDownload fileReferenceDownload) { + Instant end = Instant.now().plus(downloadTimeout); FileReference fileReference = fileReferenceDownload.fileReference(); int retryCount = 0; Connection connection = connectionPool.getCurrent(); do { + backoff(retryCount); + if (FileDownloader.fileReferenceExists(fileReference, downloadDirectory)) return; if (startDownloadRpc(fileReferenceDownload, retryCount, connection)) return; - try { Thread.sleep(sleepBetweenRetries.toMillis()); } catch (InterruptedException e) { /* ignored */} retryCount++; - // There is no one connection that will always work for each file reference (each file reference might // exist on just one config server, and which one could be different for each file reference), so we // should get a new connection for every retry connection = connectionPool.switchConnection(connection); - } while (retryCount < 5); + } while (retryCount < 5 || Instant.now().isAfter(end)); fileReferenceDownload.future().completeExceptionally(new RuntimeException("Failed getting " + fileReference)); downloads.remove(fileReference); } + private void backoff(int retryCount) { + if (retryCount > 0) { + try { + long sleepTime = Math.min(120_000, (long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis()); + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + /* ignored */ + } + } + } + Future<Optional<File>> startDownload(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); Optional<FileReferenceDownload> inProgress = downloads.get(fileReference); if (inProgress.isPresent()) return inProgress.get().future(); - log.log(Level.FINE, () -> "Will download file reference '" + fileReference.value() + "' with timeout " + downloadTimeout); + log.log(Level.FINE, () -> "Will download " + fileReference + " with timeout " + downloadTimeout); downloads.add(fileReferenceDownload); downloadExecutor.submit(() -> waitUntilDownloadStarted(fileReferenceDownload)); return fileReferenceDownload.future(); @@ -92,23 +105,25 @@ public class FileReferenceDownloader { private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount, Connection connection) { Request request = createRequest(fileReferenceDownload); - connection.invokeSync(request, rpcTimeout(retryCount).getSeconds()); + Duration rpcTimeout = rpcTimeout(retryCount); + connection.invokeSync(request, rpcTimeout.getSeconds()); Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE); FileReference fileReference = fileReferenceDownload.fileReference(); if (validateResponse(request)) { log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection); if (request.returnValues().get(0).asInt32() == 0) { - log.log(Level.FINE, () -> "Found '" + fileReference + "' available at " + connection.getAddress()); + log.log(Level.FINE, () -> "Found " + fileReference + " available at " + connection.getAddress()); return true; } else { - log.log(logLevel, "'" + fileReference + "' not found at " + connection.getAddress()); + log.log(logLevel, fileReference + " not found at " + connection.getAddress()); return false; } } else { log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " + - request + ", error: " + request.errorMessage() + ", will switch config server for next request" + - " (retry " + retryCount + ", rpc timeout " + rpcTimeout(retryCount)); + request + ", error: " + request.errorCode() + "(" + request.errorMessage() + + "). Will switch config server for next request" + + " (retry " + retryCount + ", rpc timeout " + rpcTimeout + ")"); return false; } } diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/LazyTemporaryStorageFileReferenceData.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/LazyTemporaryStorageFileReferenceData.java index 0d4f207b48e..974d5ff1489 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/LazyTemporaryStorageFileReferenceData.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/LazyTemporaryStorageFileReferenceData.java @@ -19,7 +19,7 @@ public class LazyTemporaryStorageFileReferenceData extends LazyFileReferenceData public void close() { try { super.close(); - Files.delete(file.toPath()); + Files.deleteIfExists(file.toPath()); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java index 97b948ef5d4..e8bd63fc083 100644 --- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java +++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java @@ -79,7 +79,7 @@ public class FileDownloaderTest { fileDownloader.downloads().completedDownloading(fileReference, fileReferenceFullPath); // Check that we get correct path and content when asking for file reference - Optional<File> pathToFile = fileDownloader.getFile(fileReference); + Optional<File> pathToFile = getFile(fileReference); assertTrue(pathToFile.isPresent()); String downloadedFile = new File(fileReferenceFullPath, filename).getAbsolutePath(); assertEquals(new File(fileReferenceFullPath, filename).getAbsolutePath(), downloadedFile); @@ -96,7 +96,7 @@ public class FileDownloaderTest { FileReference fileReference = new FileReference("bar"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); - assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent()); + assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status when unable to download assertDownloadStatus(fileReference, 0.0); @@ -107,7 +107,7 @@ public class FileDownloaderTest { FileReference fileReference = new FileReference("baz"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); - assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent()); + assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status assertDownloadStatus(fileReference, 0.0); @@ -115,7 +115,7 @@ public class FileDownloaderTest { // Receives fileReference, should return and make it available to caller String filename = "abc.jar"; receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content"); - Optional<File> downloadedFile = fileDownloader.getFile(fileReference); + Optional<File> downloadedFile = getFile(fileReference); assertTrue(downloadedFile.isPresent()); File downloadedFileFullPath = new File(fileReferenceFullPath, filename); @@ -132,7 +132,7 @@ public class FileDownloaderTest { FileReference fileReference = new FileReference("fileReferenceToDirWithManyFiles"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); - assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent()); + assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Verify download status assertDownloadStatus(fileReference, 0.0); @@ -150,7 +150,7 @@ public class FileDownloaderTest { File tarFile = CompressedFileReference.compress(tempPath.toFile(), Arrays.asList(fooFile, barFile), new File(tempPath.toFile(), filename)); byte[] tarredContent = IOUtils.readFileBytes(tarFile); receiveFile(fileReference, filename, FileReferenceData.Type.compressed, tarredContent); - Optional<File> downloadedFile = fileDownloader.getFile(fileReference); + Optional<File> downloadedFile = getFile(fileReference); assertTrue(downloadedFile.isPresent()); File downloadedFoo = new File(fileReferenceFullPath, tempPath.relativize(fooFile.toPath()).toString()); @@ -174,7 +174,7 @@ public class FileDownloaderTest { FileReference fileReference = new FileReference("fileReference"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); - assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent()); + assertFalse(fileReferenceFullPath.getAbsolutePath(), getFile(fileReference).isPresent()); // Getting file failed, verify download status and since there was an error is not downloading ATM assertDownloadStatus(fileReference, 0.0); @@ -183,7 +183,7 @@ public class FileDownloaderTest { // Receives fileReference, should return and make it available to caller String filename = "abc.jar"; receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content"); - Optional<File> downloadedFile = fileDownloader.getFile(fileReference); + Optional<File> downloadedFile = getFile(fileReference); assertTrue(downloadedFile.isPresent()); File downloadedFileFullPath = new File(fileReferenceFullPath, filename); assertEquals(downloadedFileFullPath.getAbsolutePath(), downloadedFile.get().getAbsolutePath()); @@ -207,7 +207,7 @@ public class FileDownloaderTest { FileReference fileReference = new FileReference("fileReference123"); File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference); - FileReferenceDownload fileReferenceDownload = new FileReferenceDownload(fileReference); + FileReferenceDownload fileReferenceDownload = new FileReferenceDownload(fileReference, "test"); Future<Future<Optional<File>>> future1 = executor.submit(() -> fileDownloader.getFutureFile(fileReferenceDownload)); do { @@ -242,15 +242,15 @@ public class FileDownloaderTest { FileDownloader fileDownloader = new FileDownloader(connectionPool, supervisor, downloadDir, timeout, sleepBetweenRetries); FileReference xyzzy = new FileReference("xyzzy"); // Should download since we do not have the file on disk - fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy)); + fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy, "test")); assertTrue(fileDownloader.isDownloading(xyzzy)); - assertFalse(fileDownloader.getFile(xyzzy).isPresent()); + assertFalse(getFile(xyzzy).isPresent()); // Receive files to simulate download receiveFile(xyzzy, "xyzzy.jar", FileReferenceData.Type.file, "content"); // Should not download, since file has already been downloaded - fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy)); + fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy, "test")); // and file should be available - assertTrue(fileDownloader.getFile(xyzzy).isPresent()); + assertTrue(getFile(xyzzy).isPresent()); } @Test @@ -296,6 +296,10 @@ public class FileDownloaderTest { fileDownloader.downloads().completedDownloading(fileReference, file); } + private Optional<File> getFile(FileReference fileReference) { + return fileDownloader.getFile(new FileReferenceDownload(fileReference, "test")); + } + private static class MockConnection implements ConnectionPool, com.yahoo.vespa.config.Connection { private ResponseHandler responseHandler; diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java index 6260ce07c3f..2a744de31b9 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java @@ -45,26 +45,6 @@ public class Flags { private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>(); - public static final UnboundBooleanFlag MAP_USER_NAMESPACE = defineFeatureFlag( - "map-user-namespace", false, - List.of("freva"), "2021-10-18", "2021-12-01", - "Whether host-admin should start containers with mapped UID/GID, will also chown all files under container storage.", - "Takes effect on next container restart.", - APPLICATION_ID, NODE_TYPE, HOSTNAME); - - public static final UnboundBooleanFlag USE_CGROUPS_V2 = defineFeatureFlag( - "use-cgroups-v2", false, - List.of("freva"), "2021-10-27", "2021-12-01", - "Whether a host should use CGroups v2", - "Will attempt to switch on next host admin tick (requires reboot).", - NODE_TYPE, HOSTNAME); - - public static final UnboundBooleanFlag MOUNT_READONLY = defineFeatureFlag( - "mount-readonly", false, - List.of("freva"), "2021-11-04", "2021-12-01", - "Whether host-admin should mount container-data and credential directories read-only when starting container", - "Takes effect on next container restart."); - public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag( "default-term-wise-limit", 1.0, List.of("baldersheim"), "2020-12-02", "2022-01-01", @@ -215,13 +195,13 @@ public class Flags { public static final UnboundListFlag<String> ALLOWED_ATHENZ_PROXY_IDENTITIES = defineListFlag( "allowed-athenz-proxy-identities", List.of(), String.class, - List.of("bjorncs", "tokle"), "2021-02-10", "2021-12-01", + List.of("bjorncs", "tokle"), "2021-02-10", "2022-02-01", "Allowed Athenz proxy identities", "takes effect at redeployment"); public static final UnboundBooleanFlag GENERATE_NON_MTLS_ENDPOINT = defineFeatureFlag( "generate-non-mtls-endpoint", true, - List.of("tokle"), "2021-02-18", "2021-12-01", + List.of("tokle"), "2021-02-18", "2022-02-01", "Whether to generate the non-mtls endpoint", "Takes effect on next internal redeployment", APPLICATION_ID); @@ -250,7 +230,7 @@ public class Flags { public static final UnboundBooleanFlag IGNORE_MERGE_QUEUE_LIMIT = defineFeatureFlag( "ignore-merge-queue-limit", false, - List.of("vekterli", "geirst"), "2021-10-06", "2021-12-01", + List.of("vekterli", "geirst"), "2021-10-06", "2022-03-01", "Specifies if merges that are forwarded (chained) from another content node are always " + "allowed to be enqueued even if the queue is otherwise full.", "Takes effect at redeploy", @@ -265,7 +245,7 @@ public class Flags { public static final UnboundDoubleFlag MIN_NODE_RATIO_PER_GROUP = defineDoubleFlag( "min-node-ratio-per-group", 0.0, - List.of("geirst", "vekterli"), "2021-07-16", "2021-12-01", + List.of("geirst", "vekterli"), "2021-07-16", "2022-03-01", "Minimum ratio of nodes that have to be available (i.e. not Down) in any hierarchic content cluster group for the group to be Up", "Takes effect at redeployment", ZONE_ID, APPLICATION_ID); @@ -302,11 +282,18 @@ public class Flags { public static final UnboundBooleanFlag DELETE_UNMAINTAINED_CERTIFICATES = defineFeatureFlag( "delete-unmaintained-certificates", false, - List.of("andreer"), "2021-09-23", "2021-12-11", + List.of("andreer"), "2021-09-23", "2021-12-21", "Whether to delete certificates that are known by provider but not by controller", "Takes effect on next run of EndpointCertificateMaintainer" ); + public static final UnboundBooleanFlag USE_NEW_ENDPOINT_CERTIFICATE_PROVIDER_URL = defineFeatureFlag( + "use-new-endpoint-certificate-provider-url", false, + List.of("andreer"), "2021-12-14", "2022-01-14", + "Use the new URL for the endpoint certificate provider API", + "Takes effect immediately" + ); + public static final UnboundBooleanFlag ENABLE_TENANT_DEVELOPER_ROLE = defineFeatureFlag( "enable-tenant-developer-role", false, List.of("bjorncs"), "2021-09-23", "2021-12-31", @@ -333,7 +320,7 @@ public class Flags { public static final UnboundIntFlag DISTRIBUTOR_MERGE_BUSY_WAIT = defineIntFlag( "distributor-merge-busy-wait", 10, - List.of("geirst", "vekterli"), "2021-10-04", "2021-12-31", + List.of("geirst", "vekterli"), "2021-10-04", "2022-03-01", "Number of seconds that scheduling of new merge operations in the distributor should be inhibited " + "towards a content node that has indicated merge busy", "Takes effect at redeploy", @@ -362,11 +349,12 @@ public class Flags { public static final UnboundStringFlag JDK_VERSION = defineStringFlag( "jdk-version", "11", - List.of("hmusum"), "2021-10-25", "2021-11-25", + List.of("hmusum"), "2021-10-25", "2022-03-01", "JDK version to use on host and inside containers. Note application-id dimension only applies for container, " + "while hostname and node type applies for host.", "Takes effect on restart for Docker container and on next host-admin tick for host", APPLICATION_ID, + TENANT_ID, HOSTNAME, NODE_TYPE); @@ -377,12 +365,26 @@ public class Flags { "Triggers restart, takes effect immediately", ZONE_ID, APPLICATION_ID); - public static final UnboundBooleanFlag USE_FILE_DISTRIBUTION_CONNECTION_POOL = defineFeatureFlag( - "use-file-distribution-connection-pool", false, - List.of("hmusum"), "2021-11-16", "2021-12-16", - "Whether to use FileDistributionConnectionPool instead of JRTConnectionPool for file downloads", - "Takes effect on config server restart", - ZONE_ID); + public static final UnboundBooleanFlag USE_V8_GEO_POSITIONS = defineFeatureFlag( + "use-v8-geo-positions", false, + List.of("arnej"), "2021-11-15", "2022-12-31", + "Use Vespa 8 types and formats for geographical positions", + "Takes effect at redeployment", + ZONE_ID, APPLICATION_ID); + + public static final UnboundBooleanFlag USE_LEGACY_LB_SERVICES = defineFeatureFlag( + "use-legacy-lb-services", false, + List.of("tokle"), "2021-11-22", "2021-12-31", + "Whether to generate routing table based on legacy lb-services config", + "Takes effect on container reboot", + ZONE_ID, HOSTNAME); + + public static final UnboundBooleanFlag USE_V8_DOC_MANAGER_CFG = defineFeatureFlag( + "use-v8-doc-manager-cfg", false, + List.of("arnej", "baldersheim"), "2021-12-09", "2022-12-31", + "Use new (preparing for Vespa 8) section in documentmanager.def", + "Takes effect at redeployment", + ZONE_ID, APPLICATION_ID); /** WARNING: public for testing: All flags should be defined in {@link Flags}. */ public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners, diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java index 593c5e5f05b..34547d14616 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java @@ -114,7 +114,7 @@ public class PermanentFlags { ZONE_ID, APPLICATION_ID); public static final UnboundStringFlag ZOOKEEPER_SERVER_VERSION = defineStringFlag( - "zookeeper-server-version", "3.6.3", + "zookeeper-server-version", "3.7.0", "ZooKeeper server version, a jar file zookeeper-server-<ZOOKEEPER_SERVER_VERSION>-jar-with-dependencies.jar must exist", "Takes effect on restart of Docker container", NODE_TYPE, APPLICATION_ID, HOSTNAME); diff --git a/fnet/src/tests/info/info.cpp b/fnet/src/tests/info/info.cpp index f2299df839e..4271546e647 100644 --- a/fnet/src/tests/info/info.cpp +++ b/fnet/src/tests/info/info.cpp @@ -77,10 +77,10 @@ TEST("size of important objects") #else constexpr size_t MUTEX_SIZE = 40u; #endif - EXPECT_EQUAL(MUTEX_SIZE + 128u, sizeof(FNET_IOComponent)); + EXPECT_EQUAL(MUTEX_SIZE + sizeof(std::string) + 112u, sizeof(FNET_IOComponent)); EXPECT_EQUAL(32u, sizeof(FNET_Channel)); EXPECT_EQUAL(40u, sizeof(FNET_PacketQueue_NoLock)); - EXPECT_EQUAL(MUTEX_SIZE + 432u, sizeof(FNET_Connection)); + EXPECT_EQUAL(MUTEX_SIZE + sizeof(std::string) + 416u, sizeof(FNET_Connection)); EXPECT_EQUAL(48u, sizeof(std::condition_variable)); EXPECT_EQUAL(56u, sizeof(FNET_DataBuffer)); EXPECT_EQUAL(8u, sizeof(FNET_Context)); diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h index 6efb147d37f..e86b670b7e5 100644 --- a/fnet/src/vespa/fnet/connection.h +++ b/fnet/src/vespa/fnet/connection.h @@ -53,7 +53,7 @@ public: class FNET_Connection : public FNET_IOComponent { public: - enum State { + enum State : uint8_t { FNET_CONNECTING, FNET_CONNECTED, FNET_CLOSING, @@ -118,9 +118,6 @@ private: static std::atomic<uint64_t> _num_connections; // total number of connections - FNET_Connection(const FNET_Connection &); - FNET_Connection &operator=(const FNET_Connection &); - /** * Get next ID that may be used for multiplexing on this connection. @@ -245,6 +242,8 @@ private: */ vespalib::string GetPeerSpec() const; public: + FNET_Connection(const FNET_Connection &) = delete; + FNET_Connection &operator=(const FNET_Connection &) = delete; /** * Construct a connection in server aspect. diff --git a/fnet/src/vespa/fnet/frt/reflection.cpp b/fnet/src/vespa/fnet/frt/reflection.cpp index 0719c8b4c71..211e681df94 100644 --- a/fnet/src/vespa/fnet/frt/reflection.cpp +++ b/fnet/src/vespa/fnet/frt/reflection.cpp @@ -9,42 +9,30 @@ FRT_Method::FRT_Method(const char * name, const char * paramSpec, const char * r FRT_METHOD_PT method, FRT_Invokable * handler) : _hashNext(nullptr), _listNext(nullptr), - _name(strdup(name)), - _paramSpec(strdup(paramSpec)), - _returnSpec(strdup(returnSpec)), + _name(name), + _paramSpec(paramSpec), + _returnSpec(returnSpec), _method(method), _handler(handler), - _docLen(0), - _doc(nullptr) + _doc() { - assert(_name != nullptr); - assert(_paramSpec != nullptr); - assert(_returnSpec != nullptr); } -FRT_Method::~FRT_Method() { - free(_name); - free(_paramSpec); - free(_returnSpec); - free(_doc); -} +FRT_Method::~FRT_Method() = default; void FRT_Method::SetDocumentation(FRT_Values *values) { - free(_doc); - _docLen = values->GetLength(); - _doc = (char *) malloc(_docLen); - assert(_doc != nullptr); + _doc.resize(values->GetLength()); - FNET_DataBuffer buf(_doc, _docLen); + FNET_DataBuffer buf(&_doc[0], _doc.size()); values->EncodeCopy(&buf); } void FRT_Method::GetDocumentation(FRT_Values *values) { - FNET_DataBuffer buf(_doc, _docLen); - buf.FreeToData(_docLen); - values->DecodeCopy(&buf, _docLen); + FNET_DataBuffer buf(&_doc[0], _doc.size()); + buf.FreeToData(_doc.size()); + values->DecodeCopy(&buf, _doc.size()); } FRT_ReflectionManager::FRT_ReflectionManager() diff --git a/fnet/src/vespa/fnet/frt/reflection.h b/fnet/src/vespa/fnet/frt/reflection.h index c867bbb45ec..6267cafeeb1 100644 --- a/fnet/src/vespa/fnet/frt/reflection.h +++ b/fnet/src/vespa/fnet/frt/reflection.h @@ -3,7 +3,8 @@ #pragma once #include "invokable.h" -#include <cstdint> +#include <string> +#include <vector> class FRT_Values; class FRT_Supervisor; @@ -14,20 +15,18 @@ class FRT_Method friend class FRT_ReflectionManager; private: - FRT_Method *_hashNext; // list of methods in hash bucket - FRT_Method *_listNext; // list of all methods - char *_name; // method name - char *_paramSpec; // method parameter spec - char *_returnSpec; // method return spec - FRT_METHOD_PT _method; // method pointer - FRT_Invokable *_handler; // method handler - uint32_t _docLen; // method documentation length - char *_doc; // method documentation - - FRT_Method(const FRT_Method &); - FRT_Method &operator=(const FRT_Method &); + FRT_Method *_hashNext; // list of methods in hash bucket + FRT_Method *_listNext; // list of all methods + std::string _name; // method name + std::string _paramSpec; // method parameter spec + std::string _returnSpec; // method return spec + FRT_METHOD_PT _method; // method pointer + FRT_Invokable *_handler; // method handler + std::vector<char> _doc; // method documentation public: + FRT_Method(const FRT_Method &) = delete; + FRT_Method &operator=(const FRT_Method &) = delete; FRT_Method(const char *name, const char *paramSpec, const char *returnSpec, @@ -37,9 +36,9 @@ public: ~FRT_Method(); FRT_Method *GetNext() { return _listNext; } - const char *GetName() { return _name; } - const char *GetParamSpec() { return _paramSpec; } - const char *GetReturnSpec() { return _returnSpec; } + const char *GetName() { return _name.c_str(); } + const char *GetParamSpec() { return _paramSpec.c_str(); } + const char *GetReturnSpec() { return _returnSpec.c_str(); } FRT_METHOD_PT GetMethod() { return _method; } FRT_Invokable *GetHandler() { return _handler; } void SetDocumentation(FRT_Values *values); diff --git a/fnet/src/vespa/fnet/iocomponent.cpp b/fnet/src/vespa/fnet/iocomponent.cpp index eeda3e12bea..f08718c0c5c 100644 --- a/fnet/src/vespa/fnet/iocomponent.cpp +++ b/fnet/src/vespa/fnet/iocomponent.cpp @@ -12,23 +12,20 @@ FNET_IOComponent::FNET_IOComponent(FNET_TransportThread *owner, : _ioc_next(nullptr), _ioc_prev(nullptr), _ioc_owner(owner), - _ioc_socket_fd(socket_fd), _ioc_selector(nullptr), - _ioc_spec(nullptr), + _ioc_spec(spec), _flags(shouldTimeOut), + _ioc_socket_fd(socket_fd), + _ioc_refcnt(1), _ioc_timestamp(vespalib::steady_clock::now()), _ioc_lock(), - _ioc_cond(), - _ioc_refcnt(1) + _ioc_cond() { - _ioc_spec = strdup(spec); - assert(_ioc_spec != nullptr); } FNET_IOComponent::~FNET_IOComponent() { - free(_ioc_spec); assert(_ioc_selector == nullptr); } diff --git a/fnet/src/vespa/fnet/iocomponent.h b/fnet/src/vespa/fnet/iocomponent.h index 9220b6dfe8f..b4f061e5bc0 100644 --- a/fnet/src/vespa/fnet/iocomponent.h +++ b/fnet/src/vespa/fnet/iocomponent.h @@ -21,9 +21,6 @@ class FNET_IOComponent { friend class FNET_TransportThread; - FNET_IOComponent(const FNET_IOComponent &); - FNET_IOComponent &operator=(const FNET_IOComponent &); - using Selector = vespalib::Selector<FNET_IOComponent>; struct Flags { @@ -44,16 +41,18 @@ protected: FNET_IOComponent *_ioc_next; // next in list FNET_IOComponent *_ioc_prev; // prev in list FNET_TransportThread *_ioc_owner; // owner(TransportThread) ref. - int _ioc_socket_fd; // source of events. Selector *_ioc_selector; // attached event selector - char *_ioc_spec; // connect/listen spec + std::string _ioc_spec; // connect/listen spec Flags _flags; // Compressed representation of boolean flags; + int _ioc_socket_fd; // source of events. + uint32_t _ioc_refcnt; // reference counter vespalib::steady_time _ioc_timestamp; // last I/O activity std::mutex _ioc_lock; // synchronization std::condition_variable _ioc_cond; // synchronization - uint32_t _ioc_refcnt; // reference counter public: + FNET_IOComponent(const FNET_IOComponent &) = delete; + FNET_IOComponent &operator=(const FNET_IOComponent &) = delete; /** * Construct an IOComponent with the given owner. The socket that @@ -80,7 +79,7 @@ public: /** * @return connect/listen spec **/ - const char *GetSpec() const { return _ioc_spec; } + const char *GetSpec() const { return _ioc_spec.c_str(); } /* * Get a guard to gain exclusive access. diff --git a/fsa/src/vespa/fsa/vectorizer.h b/fsa/src/vespa/fsa/vectorizer.h index 3dead2faa2e..6ce94e92569 100644 --- a/fsa/src/vespa/fsa/vectorizer.h +++ b/fsa/src/vespa/fsa/vectorizer.h @@ -49,7 +49,7 @@ public: /** * @brief Default constructor, creates empty item with zero weight. */ - VectorItem() : _term(), _weight(0.0), _hits() {} + VectorItem() noexcept : _term(), _weight(0.0), _hits() {} /** * @brief Copy constructor. diff --git a/hosted-tenant-base/pom.xml b/hosted-tenant-base/pom.xml index 66b8cb56443..e7c364cb7de 100644 --- a/hosted-tenant-base/pom.xml +++ b/hosted-tenant-base/pom.xml @@ -36,7 +36,7 @@ <target_jdk_version>11</target_jdk_version> <maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version> <maven-surefire-plugin.version>2.22.0</maven-surefire-plugin.version> - <junit.version>5.7.0</junit.version> <!-- NOTE: this must be in sync with junit version specified in 'tenant-cd-api' --> + <junit.version>5.8.1</junit.version> <!-- NOTE: this must be in sync with junit version specified in 'tenant-cd-api' --> <test.categories>!integration</test.categories> <!-- To allow specialized base pom to include additional "test provided" dependencies --> @@ -200,6 +200,8 @@ org.junit.jupiter:junit-jupiter-api:provided, org.junit.jupiter:junit-jupiter-engine:test, org.junit.vintage:junit-vintage-engine:test, + com.yahoo.vespa:vespa-feed-client:runtime, <!-- prevent effective compile scope of vespa-feed-client in test bundle --> + com.yahoo.vespa:vespa-feed-client-api:provided, com.yahoo.vespa:tenant-cd-api:provided </testBundleScopeOverrides> </configuration> @@ -265,9 +267,19 @@ </goals> <configuration> <tasks> - <!-- Creating a dummy file to support running tests with old test runner. Remove when it is no longer in use --> - <mkdir dir="target/application-test/artifacts" /> - <touch file="target/application-test/artifacts/.ignore" /> + <!-- Workaround to copy src/test/application/tests only when its parents exists: + Copy in two steps, eliminating the parents in the helper step--> + + <mkdir dir="target/application-test/src/test/application" /> + <copy todir="target/application-test/"> + <fileset dir="." includes="src/test/application/tests/**" /> + </copy> + + <copy todir="target/application-test/"> + <fileset dir="target/application-test/src/test/application" includes="tests/**" /> + </copy> + <delete dir="target/application-test/src" /> + <copy file="target/${project.artifactId}-tests.jar" todir="target/application-test/components/" /> <zip destfile="target/application-test.zip" basedir="target/application-test/" /> </tasks> diff --git a/hosted-zone-api/abi-spec.json b/hosted-zone-api/abi-spec.json index b1b8eb84705..11375d97972 100644 --- a/hosted-zone-api/abi-spec.json +++ b/hosted-zone-api/abi-spec.json @@ -105,8 +105,11 @@ "public" ], "methods": [ + "public void <init>(ai.vespa.cloud.ApplicationId, ai.vespa.cloud.Zone)", "public void <init>(ai.vespa.cloud.Zone)", - "public ai.vespa.cloud.Zone zone()" + "public ai.vespa.cloud.ApplicationId application()", + "public ai.vespa.cloud.Zone zone()", + "public static ai.vespa.cloud.ZoneInfo defaultInfo()" ], "fields": [] } diff --git a/hosted-zone-api/src/main/java/ai/vespa/cloud/ZoneInfo.java b/hosted-zone-api/src/main/java/ai/vespa/cloud/ZoneInfo.java index d9af2421ab9..e4b69caa940 100644 --- a/hosted-zone-api/src/main/java/ai/vespa/cloud/ZoneInfo.java +++ b/hosted-zone-api/src/main/java/ai/vespa/cloud/ZoneInfo.java @@ -4,9 +4,9 @@ package ai.vespa.cloud; import java.util.Objects; /** - * Provides information about the zone in which this container is running. + * Provides information about the zone context in which this container is running. * This is available and can be injected when running in a cloud environment. - * If you don't need any other information than the zone this should be preferred + * If you don't need any other information than what's present here this should be preferred * to SystemInfo as it will never change at runtime and therefore does not * cause unnecessary reconstruction. * @@ -14,14 +14,30 @@ import java.util.Objects; */ public class ZoneInfo { + private static final ZoneInfo defaultInfo = new ZoneInfo(new ApplicationId("default", "default", "default"), + new Zone(Environment.prod, "default")); + + private final ApplicationId application; private final Zone zone; + public ZoneInfo(ApplicationId application, Zone zone) { + this.application = Objects.requireNonNull(application, "Application cannot be null!"); + this.zone = Objects.requireNonNull(zone, "Zone cannot be null!"); + } + + /** @deprecated pass an application id */ + @Deprecated // Remove on Vespa 8 public ZoneInfo(Zone zone) { - Objects.requireNonNull(zone, "Zone cannot be null!"); - this.zone = zone; + this(new ApplicationId("default", "default", "default"), zone); } + /** Returns the application this is running as part of */ + public ApplicationId application() { return application; } + /** Returns the zone this is running in */ public Zone zone() { return zone; } + /** Returns the info instance used when no zone info is available because we're not running in a cloud context */ + public static ZoneInfo defaultInfo() { return defaultInfo; } + } diff --git a/integration/intellij/build.gradle b/integration/intellij/build.gradle index 4801bc810b0..2d5ad1f33d6 100644 --- a/integration/intellij/build.gradle +++ b/integration/intellij/build.gradle @@ -36,7 +36,7 @@ compileJava { } group 'ai.vespa' -version '1.0.0' // Also update pom.xml version if this is changed +version '1.0.2' // Also update pom.xml version if this is changed sourceCompatibility = 11 @@ -61,10 +61,10 @@ buildSearchableOptions { patchPluginXml { version = project.version sinceBuild = '203' - untilBuild = '212.*' + untilBuild = '213.*' // in changeNotes you can add a description of the changes in this version (would appear in the plugin page in preferences\plugins) changeNotes = """ - <em></em>""" + <em>Support IntelliJ 213</em>""" } test { diff --git a/integration/intellij/pom.xml b/integration/intellij/pom.xml index 84a24b788b8..984968b2834 100644 --- a/integration/intellij/pom.xml +++ b/integration/intellij/pom.xml @@ -9,7 +9,7 @@ <relativePath>../parent/pom.xml</relativePath> </parent> <artifactId>vespa-intellij</artifactId> <!-- Not used - plugin is build by gradle --> - <version>1.0.0</version> <!-- See copy-zip below, which depends on this being the same as the v. in build.gradle --> + <version>1.0.2</version> <!-- See copy-zip below, which depends on this being the same as the v. in build.gradle --> <description> Maven wrapper for the gradle build of this IntelliJ plugin. </description> diff --git a/integration/intellij/src/main/resources/META-INF/plugin.xml b/integration/intellij/src/main/resources/META-INF/plugin.xml index 49db6c59b3e..673a66f3228 100644 --- a/integration/intellij/src/main/resources/META-INF/plugin.xml +++ b/integration/intellij/src/main/resources/META-INF/plugin.xml @@ -11,8 +11,19 @@ <depends>com.intellij.modules.platform</depends> <!-- Text to display as description on Preferences/Settings | Plugin page --> - <description><![CDATA[ - <p>Vespa.ai schema file support</p> + <description><![CDATA[ <p>Support for editing Vespa.ai + <a href="https://docs.vespa.ai/en/reference/schema-reference.html">schema files</a>.</p> + <p>Features:</p> + <ul> + <li>Syntax highlighting</li> + <li>Structure view</li> + <li>Find usages</li> + <li>Function call hierarchy</li> + <li>Go to declaration</li> + <li>Go to symbol</li> + <li>Refactoring</li> + <li>Turn lines into a comment</li> + </ul> ]]></description> <!-- Extension points defined by the plugin --> diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java index ecadd0b1b87..e3a3568700c 100644 --- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java +++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java @@ -1,18 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.http.filter.security.cors; -import com.yahoo.jdisc.http.Cookie; +import com.yahoo.jdisc.http.HttpResponse; import com.yahoo.jdisc.http.filter.DiscFilterResponse; import com.yahoo.jdisc.http.filter.RequestView; import com.yahoo.jdisc.http.filter.SecurityResponseFilter; import com.yahoo.jdisc.http.filter.security.cors.CorsFilterConfig.Builder; -import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse; import org.junit.Test; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; @@ -75,11 +73,9 @@ public class CorsResponseFilterTest { } private static class TestResponse extends DiscFilterResponse { - Map<String, String> headers = new HashMap<>(); + final Map<String, String> headers = new HashMap<>(); - TestResponse() { - super(mock(ServletOrJdiscHttpResponse.class)); - } + TestResponse() { super(HttpResponse.newInstance(200)); } @Override public void setHeader(String name, String value) { @@ -90,23 +86,5 @@ public class CorsResponseFilterTest { public String getHeader(String name) { return headers.get(name); } - - @Override - public void removeHeaders(String s) { throw new UnsupportedOperationException(); } - - @Override - public void setHeaders(String s, String s1) { throw new UnsupportedOperationException(); } - - @Override - public void setHeaders(String s, List<String> list) { throw new UnsupportedOperationException(); } - - @Override - public void addHeader(String s, String s1) { throw new UnsupportedOperationException(); } - - @Override - public void setCookies(List<Cookie> list) { throw new UnsupportedOperationException(); } - - @Override - public void setStatus(int i) { throw new UnsupportedOperationException(); } } } diff --git a/jdisc_core/abi-spec.json b/jdisc_core/abi-spec.json new file mode 100644 index 00000000000..d1b676b330f --- /dev/null +++ b/jdisc_core/abi-spec.json @@ -0,0 +1,915 @@ +{ + "com.yahoo.jdisc.AbstractResource": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.SharedResource" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "protected void <init>()", + "public final com.yahoo.jdisc.ResourceReference refer()", + "public final com.yahoo.jdisc.ResourceReference refer(java.lang.Object)", + "public final void release()", + "public final int retainCount()", + "protected void destroy()", + "public java.lang.String currentState()" + ], + "fields": [] + }, + "com.yahoo.jdisc.Container": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.SharedResource", + "com.yahoo.jdisc.Timer" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract com.yahoo.jdisc.handler.RequestHandler resolveHandler(com.yahoo.jdisc.Request)", + "public abstract java.lang.Object getInstance(com.google.inject.Key)", + "public abstract java.lang.Object getInstance(java.lang.Class)" + ], + "fields": [] + }, + "com.yahoo.jdisc.HeaderFields": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.Map" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public int size()", + "public boolean isEmpty()", + "public boolean containsKey(java.lang.Object)", + "public boolean containsValue(java.lang.Object)", + "public boolean contains(java.lang.String, java.lang.String)", + "public boolean containsIgnoreCase(java.lang.String, java.lang.String)", + "public void add(java.lang.String, java.lang.String)", + "public void add(java.lang.String, java.util.List)", + "public void addAll(java.util.Map)", + "public java.util.List put(java.lang.String, java.lang.String)", + "public java.util.List put(java.lang.String, java.util.List)", + "public void putAll(java.util.Map)", + "public java.util.List remove(java.lang.Object)", + "public boolean remove(java.lang.String, java.lang.String)", + "public void clear()", + "public java.util.List get(java.lang.Object)", + "public java.lang.String getFirst(java.lang.String)", + "public boolean isTrue(java.lang.String)", + "public java.util.Set keySet()", + "public java.util.Collection values()", + "public java.util.Set entrySet()", + "public java.lang.String toString()", + "public java.util.List entries()", + "public boolean equals(java.lang.Object)", + "public int hashCode()", + "public bridge synthetic java.lang.Object remove(java.lang.Object)", + "public bridge synthetic java.lang.Object put(java.lang.Object, java.lang.Object)", + "public bridge synthetic java.lang.Object get(java.lang.Object)" + ], + "fields": [] + }, + "com.yahoo.jdisc.Metric$Context": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [], + "fields": [] + }, + "com.yahoo.jdisc.Metric": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void set(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)", + "public abstract void add(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)", + "public abstract com.yahoo.jdisc.Metric$Context createContext(java.util.Map)" + ], + "fields": [] + }, + "com.yahoo.jdisc.NoopSharedResource": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.SharedResource" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public final com.yahoo.jdisc.ResourceReference refer()", + "public final com.yahoo.jdisc.ResourceReference refer(java.lang.Object)", + "public final void release()" + ], + "fields": [] + }, + "com.yahoo.jdisc.ReferencedResource": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.lang.AutoCloseable" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.SharedResource, com.yahoo.jdisc.ResourceReference)", + "public com.yahoo.jdisc.SharedResource getResource()", + "public com.yahoo.jdisc.ResourceReference getReference()", + "public void close()" + ], + "fields": [] + }, + "com.yahoo.jdisc.References": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public static com.yahoo.jdisc.ResourceReference fromResource(com.yahoo.jdisc.SharedResource)" + ], + "fields": [ + "public static final com.yahoo.jdisc.ResourceReference NOOP_REFERENCE" + ] + }, + "com.yahoo.jdisc.Request$RequestType": { + "superClass": "java.lang.Enum", + "interfaces": [], + "attributes": [ + "public", + "final", + "enum" + ], + "methods": [ + "public static com.yahoo.jdisc.Request$RequestType[] values()", + "public static com.yahoo.jdisc.Request$RequestType valueOf(java.lang.String)" + ], + "fields": [ + "public static final enum com.yahoo.jdisc.Request$RequestType READ", + "public static final enum com.yahoo.jdisc.Request$RequestType WRITE", + "public static final enum com.yahoo.jdisc.Request$RequestType MONITORING" + ] + }, + "com.yahoo.jdisc.Request": { + "superClass": "com.yahoo.jdisc.AbstractResource", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.service.CurrentContainer, java.net.URI)", + "public void <init>(com.yahoo.jdisc.service.CurrentContainer, java.net.URI, boolean)", + "public void <init>(com.yahoo.jdisc.Request, java.net.URI)", + "public com.yahoo.jdisc.Container container()", + "public java.net.URI getUri()", + "public com.yahoo.jdisc.Request setUri(java.net.URI)", + "public boolean isServerRequest()", + "public com.yahoo.jdisc.Request setServerRequest(boolean)", + "public com.yahoo.jdisc.application.BindingMatch getBindingMatch()", + "public com.yahoo.jdisc.Request setBindingMatch(com.yahoo.jdisc.application.BindingMatch)", + "public java.util.Map context()", + "public com.yahoo.jdisc.HeaderFields headers()", + "public void setTimeoutManager(com.yahoo.jdisc.TimeoutManager)", + "public com.yahoo.jdisc.TimeoutManager getTimeoutManager()", + "public void setTimeout(long, java.util.concurrent.TimeUnit)", + "public java.lang.Long getTimeout(java.util.concurrent.TimeUnit)", + "public java.lang.Long timeRemaining(java.util.concurrent.TimeUnit)", + "public long timeElapsed(java.util.concurrent.TimeUnit)", + "public long creationTime(java.util.concurrent.TimeUnit)", + "public boolean isCancelled()", + "public void cancel()", + "public com.yahoo.jdisc.handler.ContentChannel connect(com.yahoo.jdisc.handler.ResponseHandler)", + "protected void destroy()" + ], + "fields": [] + }, + "com.yahoo.jdisc.ResourceReference": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.lang.AutoCloseable" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void close()" + ], + "fields": [] + }, + "com.yahoo.jdisc.Response$Status": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [], + "fields": [ + "public static final int CONTINUE", + "public static final int SWITCHING_PROTOCOLS", + "public static final int PROCESSING", + "public static final int OK", + "public static final int CREATED", + "public static final int ACCEPTED", + "public static final int NON_AUTHORITATIVE_INFORMATION", + "public static final int NO_CONTENT", + "public static final int RESET_CONTENT", + "public static final int PARTIAL_CONTENT", + "public static final int MULTI_STATUS", + "public static final int MULTIPLE_CHOICES", + "public static final int MOVED_PERMANENTLY", + "public static final int FOUND", + "public static final int SEE_OTHER", + "public static final int NOT_MODIFIED", + "public static final int USE_PROXY", + "public static final int TEMPORARY_REDIRECT", + "public static final int BAD_REQUEST", + "public static final int UNAUTHORIZED", + "public static final int PAYMENT_REQUIRED", + "public static final int FORBIDDEN", + "public static final int NOT_FOUND", + "public static final int METHOD_NOT_ALLOWED", + "public static final int NOT_ACCEPTABLE", + "public static final int PROXY_AUTHENTICATION_REQUIRED", + "public static final int REQUEST_TIMEOUT", + "public static final int CONFLICT", + "public static final int GONE", + "public static final int LENGTH_REQUIRED", + "public static final int PRECONDITION_FAILED", + "public static final int REQUEST_TOO_LONG", + "public static final int REQUEST_URI_TOO_LONG", + "public static final int UNSUPPORTED_MEDIA_TYPE", + "public static final int REQUESTED_RANGE_NOT_SATISFIABLE", + "public static final int EXPECTATION_FAILED", + "public static final int INSUFFICIENT_SPACE_ON_RESOURCE", + "public static final int METHOD_FAILURE", + "public static final int UNPROCESSABLE_ENTITY", + "public static final int LOCKED", + "public static final int FAILED_DEPENDENCY", + "public static final int TOO_MANY_REQUESTS", + "public static final int INTERNAL_SERVER_ERROR", + "public static final int NOT_IMPLEMENTED", + "public static final int BAD_GATEWAY", + "public static final int SERVICE_UNAVAILABLE", + "public static final int GATEWAY_TIMEOUT", + "public static final int VERSION_NOT_SUPPORTED", + "public static final int INSUFFICIENT_STORAGE" + ] + }, + "com.yahoo.jdisc.Response": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(int)", + "public void <init>(int, java.lang.Throwable)", + "public java.util.Map context()", + "public com.yahoo.jdisc.HeaderFields headers()", + "public int getStatus()", + "public com.yahoo.jdisc.Response setStatus(int)", + "public java.lang.Throwable getError()", + "public com.yahoo.jdisc.Response setError(java.lang.Throwable)", + "public void setRequestType(com.yahoo.jdisc.Request$RequestType)", + "public com.yahoo.jdisc.Request$RequestType getRequestType()", + "public static void dispatchTimeout(com.yahoo.jdisc.handler.ResponseHandler)" + ], + "fields": [] + }, + "com.yahoo.jdisc.SharedResource$Debug": { + "superClass": "java.lang.Enum", + "interfaces": [], + "attributes": [ + "public", + "final", + "enum" + ], + "methods": [ + "public static com.yahoo.jdisc.SharedResource$Debug[] values()", + "public static com.yahoo.jdisc.SharedResource$Debug valueOf(java.lang.String)" + ], + "fields": [ + "public static final enum com.yahoo.jdisc.SharedResource$Debug NO", + "public static final enum com.yahoo.jdisc.SharedResource$Debug SIMPLE", + "public static final enum com.yahoo.jdisc.SharedResource$Debug STACK" + ] + }, + "com.yahoo.jdisc.SharedResource": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public com.yahoo.jdisc.ResourceReference refer()", + "public com.yahoo.jdisc.ResourceReference refer(java.lang.Object)", + "public abstract void release()" + ], + "fields": [ + "public static final java.lang.String SYSTEM_PROPERTY_NAME_DEBUG", + "public static final com.yahoo.jdisc.SharedResource$Debug DEBUG" + ] + }, + "com.yahoo.jdisc.TimeoutManager": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void scheduleTimeout(com.yahoo.jdisc.Request)", + "public void unscheduleTimeout(com.yahoo.jdisc.Request)" + ], + "fields": [] + }, + "com.yahoo.jdisc.Timer$ClockAdapter": { + "superClass": "java.time.Clock", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public java.time.ZoneId getZone()", + "public java.time.Clock withZone(java.time.ZoneId)", + "public java.time.Instant instant()" + ], + "fields": [] + }, + "com.yahoo.jdisc.Timer": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract long currentTimeMillis()", + "public java.time.Instant currentTime()", + "public java.time.Clock toUtcClock()", + "public static com.yahoo.jdisc.Timer fromClock(java.time.Clock)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.AbstractRequestHandler": { + "superClass": "com.yahoo.jdisc.AbstractResource", + "interfaces": [ + "com.yahoo.jdisc.handler.RequestHandler" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.BindingNotFoundException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(java.net.URI)", + "public java.net.URI uri()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.BlockingContentWriter": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ContentChannel)", + "public void write(java.nio.ByteBuffer)", + "public void close()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.BufferedContentChannel": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.handler.ContentChannel" + ], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>()", + "public void connectTo(com.yahoo.jdisc.handler.ContentChannel)", + "public boolean isConnected()", + "public com.yahoo.jdisc.handler.ReadableContentChannel toReadable()", + "public com.yahoo.jdisc.handler.ContentInputStream toStream()", + "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)", + "public void close(com.yahoo.jdisc.handler.CompletionHandler)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.CallableRequestDispatch": { + "superClass": "com.yahoo.jdisc.handler.RequestDispatch", + "interfaces": [ + "java.util.concurrent.Callable" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "public final com.yahoo.jdisc.Response call()", + "public bridge synthetic java.lang.Object call()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.CallableResponseDispatch": { + "superClass": "com.yahoo.jdisc.handler.ResponseDispatch", + "interfaces": [ + "java.util.concurrent.Callable" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ResponseHandler)", + "public final java.lang.Boolean call()", + "public bridge synthetic java.lang.Object call()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.CompletionHandler": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void completed()", + "public abstract void failed(java.lang.Throwable)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ContentChannel": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)", + "public abstract void close(com.yahoo.jdisc.handler.CompletionHandler)", + "public void onError(java.lang.Throwable)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ContentInputStream": { + "superClass": "com.yahoo.jdisc.handler.UnsafeContentInputStream", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ReadableContentChannel)", + "public void finalize()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.DelegatedRequestHandler": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.handler.RequestHandler" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract com.yahoo.jdisc.handler.RequestHandler getDelegate()", + "public com.yahoo.jdisc.handler.RequestHandler getDelegateRecursive()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.FastContentOutputStream": { + "superClass": "com.yahoo.jdisc.handler.AbstractContentOutputStream", + "interfaces": [ + "java.util.concurrent.Future" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ContentChannel)", + "public void <init>(com.yahoo.jdisc.handler.FastContentWriter)", + "protected void doFlush(java.nio.ByteBuffer)", + "protected void doClose()", + "public boolean cancel(boolean)", + "public boolean isCancelled()", + "public boolean isDone()", + "public java.lang.Boolean get()", + "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.FastContentWriter": { + "superClass": "java.util.concurrent.CompletableFuture", + "interfaces": [ + "java.lang.AutoCloseable" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ContentChannel)", + "public void write(java.lang.String)", + "public void write(byte[])", + "public void write(byte[], int, int)", + "public void write(java.nio.ByteBuffer)", + "public void close()", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", + "public boolean cancel(boolean)", + "public boolean isCancelled()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.FutureCompletion": { + "superClass": "java.util.concurrent.CompletableFuture", + "interfaces": [ + "com.yahoo.jdisc.handler.CompletionHandler" + ], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>()", + "public void completed()", + "public void failed(java.lang.Throwable)", + "public final boolean cancel(boolean)", + "public final boolean isCancelled()", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.FutureResponse": { + "superClass": "java.util.concurrent.CompletableFuture", + "interfaces": [ + "com.yahoo.jdisc.handler.ResponseHandler" + ], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>()", + "public void <init>(com.yahoo.jdisc.handler.ContentChannel)", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", + "public void <init>(com.yahoo.jdisc.handler.ResponseHandler)", + "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)", + "public final boolean cancel(boolean)", + "public final boolean isCancelled()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.NullContent": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.handler.ContentChannel" + ], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)", + "public void close(com.yahoo.jdisc.handler.CompletionHandler)" + ], + "fields": [ + "public static final com.yahoo.jdisc.handler.NullContent INSTANCE" + ] + }, + "com.yahoo.jdisc.handler.OverloadException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String, java.lang.Throwable)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ReadableContentChannel": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.handler.ContentChannel", + "java.lang.Iterable" + ], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>()", + "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)", + "public void close(com.yahoo.jdisc.handler.CompletionHandler)", + "public java.util.Iterator iterator()", + "public int available()", + "public java.nio.ByteBuffer read()", + "public void failed(java.lang.Throwable)", + "public com.yahoo.jdisc.handler.ContentInputStream toStream()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.RequestDeniedException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.Request)", + "public com.yahoo.jdisc.Request request()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.RequestDispatch": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.concurrent.Future", + "com.yahoo.jdisc.handler.ResponseHandler" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "protected abstract com.yahoo.jdisc.Request newRequest()", + "protected java.lang.Iterable requestContent()", + "public final com.yahoo.jdisc.handler.ContentChannel connect()", + "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter()", + "public final java.util.concurrent.CompletableFuture dispatch()", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", + "public final boolean cancel(boolean)", + "public final boolean isCancelled()", + "public final boolean isDone()", + "public final com.yahoo.jdisc.Response get()", + "public final com.yahoo.jdisc.Response get(long, java.util.concurrent.TimeUnit)", + "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)", + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.RequestHandler": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.SharedResource" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)", + "public abstract void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ResponseDispatch": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.concurrent.Future" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "protected abstract com.yahoo.jdisc.Response newResponse()", + "protected java.lang.Iterable responseContent()", + "public final com.yahoo.jdisc.handler.ContentChannel connect(com.yahoo.jdisc.handler.ResponseHandler)", + "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter(com.yahoo.jdisc.handler.ResponseHandler)", + "public final java.util.concurrent.CompletableFuture dispatch(com.yahoo.jdisc.handler.ResponseHandler)", + "public final boolean cancel(boolean)", + "public final boolean isCancelled()", + "public boolean isDone()", + "public java.lang.Boolean get()", + "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)", + "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.nio.ByteBuffer[])", + "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.lang.Iterable)", + "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.nio.ByteBuffer[])", + "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.lang.Iterable)", + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ResponseHandler": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.ThreadedRequestHandler": { + "superClass": "com.yahoo.jdisc.handler.AbstractRequestHandler", + "interfaces": [], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "protected void <init>(java.util.concurrent.Executor)", + "public final void setTimeout(long, java.util.concurrent.TimeUnit)", + "public final long getTimeout(java.util.concurrent.TimeUnit)", + "public final com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)", + "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.BufferedContentChannel, com.yahoo.jdisc.handler.ResponseHandler)", + "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ReadableContentChannel, com.yahoo.jdisc.handler.ResponseHandler)", + "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ContentInputStream, com.yahoo.jdisc.handler.ResponseHandler)" + ], + "fields": [] + }, + "com.yahoo.jdisc.handler.UnsafeContentInputStream": { + "superClass": "java.io.InputStream", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.jdisc.handler.ReadableContentChannel)", + "public int read()", + "public int read(byte[], int, int)", + "public int available()", + "public void close()", + "public synchronized void mark(int)", + "public synchronized void reset()", + "public boolean markSupported()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.AbstractClientProvider": { + "superClass": "com.yahoo.jdisc.handler.AbstractRequestHandler", + "interfaces": [ + "com.yahoo.jdisc.service.ClientProvider" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "public void start()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.AbstractServerProvider": { + "superClass": "com.yahoo.jdisc.AbstractResource", + "interfaces": [ + "com.yahoo.jdisc.service.ServerProvider" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "protected void <init>(com.yahoo.jdisc.service.CurrentContainer)", + "public final com.yahoo.jdisc.service.CurrentContainer container()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.BindingSetNotFoundException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(java.lang.String)", + "public java.lang.String bindingSet()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.ClientProvider": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.handler.RequestHandler" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void start()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.ContainerNotReadyException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.CurrentContainer": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public com.yahoo.jdisc.Container newReference(java.net.URI, java.lang.Object)", + "public com.yahoo.jdisc.Container newReference(java.net.URI)" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.NoBindingSetSelectedException": { + "superClass": "java.lang.RuntimeException", + "interfaces": [], + "attributes": [ + "public", + "final" + ], + "methods": [ + "public void <init>(java.net.URI)", + "public java.net.URI uri()" + ], + "fields": [] + }, + "com.yahoo.jdisc.service.ServerProvider": { + "superClass": "java.lang.Object", + "interfaces": [ + "com.yahoo.jdisc.SharedResource" + ], + "attributes": [ + "public", + "interface", + "abstract" + ], + "methods": [ + "public abstract void start()", + "public abstract void close()" + ], + "fields": [] + } +}
\ No newline at end of file diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml index caa97b4e7e6..4a68c8e0cac 100644 --- a/jdisc_core/pom.xml +++ b/jdisc_core/pom.xml @@ -171,6 +171,10 @@ <build> <plugins> <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/application/ContainerBuilder.java b/jdisc_core/src/main/java/com/yahoo/jdisc/application/ContainerBuilder.java index db7d7e82ffb..eb602737c8f 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/application/ContainerBuilder.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/application/ContainerBuilder.java @@ -8,8 +8,11 @@ import com.google.inject.Module; import com.yahoo.jdisc.Container; import com.yahoo.jdisc.handler.RequestHandler; -import java.util.*; -import java.util.concurrent.ThreadFactory; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; /** * <p>This is the inactive, mutable {@link Container}. Because it requires references to the application internals, it @@ -38,8 +41,8 @@ public class ContainerBuilder { bind(ContainerBuilder.class).toInstance(ContainerBuilder.this); } }); - this.serverBindings.put(BindingSet.DEFAULT, new BindingRepository<RequestHandler>()); - this.clientBindings.put(BindingSet.DEFAULT, new BindingRepository<RequestHandler>()); + this.serverBindings.put(BindingSet.DEFAULT, new BindingRepository<>()); + this.clientBindings.put(BindingSet.DEFAULT, new BindingRepository<>()); } public void setAppContext(Object ctx) { @@ -130,4 +133,5 @@ public class ContainerBuilder { } return lst; } + } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java index 54e50df5a25..e001db2ab81 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java @@ -1,12 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; - import java.nio.ByteBuffer; import java.util.Objects; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -20,7 +19,7 @@ import java.util.concurrent.TimeoutException; * * @author Simon Thoresen Hult */ -public class FastContentOutputStream extends AbstractContentOutputStream implements ListenableFuture<Boolean> { +public class FastContentOutputStream extends AbstractContentOutputStream implements Future<Boolean> { private final FastContentWriter out; @@ -78,7 +77,6 @@ public class FastContentOutputStream extends AbstractContentOutputStream impleme return out.get(timeout, unit); } - @Override public void addListener(Runnable listener, Executor executor) { out.addListener(listener, executor); } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java index 596ae07f1d5..7c278c67d59 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java @@ -1,16 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; - import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Objects; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -25,13 +20,12 @@ import java.util.concurrent.atomic.AtomicInteger; * * @author Simon Thoresen Hult */ -public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseable { +public class FastContentWriter extends CompletableFuture<Boolean> implements AutoCloseable { private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicInteger numPendingCompletions = new AtomicInteger(); private final CompletionHandler completionHandler = new SimpleCompletionHandler(); private final ContentChannel out; - private final SettableFuture<Boolean> future = SettableFuture.create(); /** * <p>Creates a new FastContentWriter that encapsulates a given {@link ContentChannel}.</p> @@ -87,7 +81,7 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab try { out.write(buf, completionHandler); } catch (Throwable t) { - future.setException(t); + completeExceptionally(t); throw t; } } @@ -103,14 +97,13 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab try { out.close(completionHandler); } catch (Throwable t) { - future.setException(t); + completeExceptionally(t); throw t; } } - @Override public void addListener(Runnable listener, Executor executor) { - future.addListener(listener, executor); + whenCompleteAsync((__, ___) -> listener.run(), executor); } @Override @@ -123,34 +116,19 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab return false; } - @Override - public boolean isDone() { - return future.isDone(); - } - - @Override - public Boolean get() throws InterruptedException, ExecutionException { - return future.get(); - } - - @Override - public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return future.get(timeout, unit); - } - private class SimpleCompletionHandler implements CompletionHandler { @Override public void completed() { numPendingCompletions.decrementAndGet(); if (closed.get() && numPendingCompletions.get() == 0) { - future.set(true); + complete(true); } } @Override public void failed(Throwable t) { - future.setException(t); + completeExceptionally(t); } } } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java index ab989b89b1f..a188be6145f 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java @@ -1,7 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; /** * <p>This class provides an implementation of {@link CompletionHandler} that allows you to wait for either {@link @@ -13,16 +14,16 @@ import com.google.common.util.concurrent.AbstractFuture; * * @author Simon Thoresen Hult */ -public final class FutureCompletion extends AbstractFuture<Boolean> implements CompletionHandler { +public final class FutureCompletion extends CompletableFuture<Boolean> implements CompletionHandler { @Override public void completed() { - set(true); + complete(true); } @Override public void failed(Throwable t) { - setException(t); + completeExceptionally(t); } @Override @@ -34,4 +35,6 @@ public final class FutureCompletion extends AbstractFuture<Boolean> implements C public final boolean isCancelled() { return false; } + + public void addListener(Runnable r, Executor e) { whenCompleteAsync((__, ___) -> r.run(), e); } } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java index c2e32f3ea56..ba304d9e2de 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java @@ -1,43 +1,50 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.JdkFutureAdapters; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.concurrent.CompletableFutures; import java.util.LinkedList; import java.util.List; -import java.util.concurrent.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class implements a Future<Boolean> that is conjunction of zero or more other Future<Boolean>s, * i.e. it evaluates to <code>true</code> if, and only if, all its operands evaluate to <code>true</code>. To use this class, - * simply create an instance of it and add operands to it using the {@link #addOperand(ListenableFuture)} method.</p> - * TODO: consider rewriting usage of FutureConjunction to use CompletableFuture instead. + * simply create an instance of it and add operands to it using the {@link #addOperand(CompletableFuture)} method.</p> * * @author Simon Thoresen Hult */ -public final class FutureConjunction implements ListenableFuture<Boolean> { +final class FutureConjunction implements Future<Boolean> { - private final List<ListenableFuture<Boolean>> operands = new LinkedList<>(); + private final List<CompletableFuture<Boolean>> operands = new LinkedList<>(); /** - * <p>Adds a ListenableFuture<Boolean> to this conjunction. This can be called at any time, even after having called + * <p>Adds a {@link CompletableFuture} to this conjunction. This can be called at any time, even after having called * {@link #get()} previously.</p> * * @param operand The operand to add to this conjunction. */ - public void addOperand(ListenableFuture<Boolean> operand) { + public void addOperand(CompletableFuture<Boolean> operand) { operands.add(operand); } - @Override public void addListener(Runnable listener, Executor executor) { - Futures.allAsList(operands).addListener(listener, executor); + CompletableFutures.allOf(operands) + .whenCompleteAsync((__, ___) -> listener.run(), executor); + } + + CompletableFuture<Boolean> completableFuture() { + return CompletableFutures.allOf(operands) + .thenApply(ops -> ops.stream().allMatch(bool -> bool)); } @Override - public final boolean cancel(boolean mayInterruptIfRunning) { + public boolean cancel(boolean mayInterruptIfRunning) { boolean ret = true; for (Future<Boolean> op : operands) { if (!op.cancel(mayInterruptIfRunning)) { @@ -48,7 +55,7 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final boolean isCancelled() { + public boolean isCancelled() { for (Future<Boolean> op : operands) { if (!op.isCancelled()) { return false; @@ -58,7 +65,7 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final boolean isDone() { + public boolean isDone() { for (Future<Boolean> op : operands) { if (!op.isDone()) { return false; @@ -68,8 +75,8 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final Boolean get() throws InterruptedException, ExecutionException { - Boolean ret = Boolean.TRUE; + public Boolean get() throws InterruptedException, ExecutionException { + boolean ret = Boolean.TRUE; for (Future<Boolean> op : operands) { if (!op.get()) { ret = Boolean.FALSE; @@ -79,9 +86,9 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, + public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - Boolean ret = Boolean.TRUE; + boolean ret = Boolean.TRUE; long nanos = unit.toNanos(timeout); long lastTime = System.nanoTime(); for (Future<Boolean> op : operands) { diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java index b8073865667..2284c563f50 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java @@ -1,16 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; import com.yahoo.jdisc.Response; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; + /** * This class provides an implementation of {@link ResponseHandler} that allows you to wait for a {@link Response} to * be returned. * * @author Simon Thoresen Hult */ -public final class FutureResponse extends AbstractFuture<Response> implements ResponseHandler { +public final class FutureResponse extends CompletableFuture<Response> implements ResponseHandler { private final ResponseHandler handler; @@ -38,6 +40,8 @@ public final class FutureResponse extends AbstractFuture<Response> implements Re }); } + public void addListener(Runnable r, Executor e) { whenCompleteAsync((__, ___) -> r.run(), e); } + /** * <p>Constructs a new FutureResponse that calls the given {@link ResponseHandler} when {@link * #handleResponse(Response)} is invoked.</p> @@ -50,7 +54,7 @@ public final class FutureResponse extends AbstractFuture<Response> implements Re @Override public ContentChannel handleResponse(Response response) { - set(response); + complete(response); return handler.handleResponse(response); } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java index c85aa6375af..c1457290904 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java @@ -1,19 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.jdisc.References; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.ResourceReference; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.SharedResource; -import com.yahoo.jdisc.References; import java.nio.ByteBuffer; import java.util.Collections; -import java.util.concurrent.*; -import java.util.ArrayList; -import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class provides a convenient way of safely dispatching a {@link Request}. Using this class you do not have to @@ -46,7 +47,7 @@ import java.util.List; * * @author Simon Thoresen Hult */ -public abstract class RequestDispatch implements ListenableFuture<Response>, ResponseHandler { +public abstract class RequestDispatch implements Future<Response>, ResponseHandler { private final FutureConjunction completions = new FutureConjunction(); private final FutureResponse futureResponse = new FutureResponse(this); @@ -106,22 +107,26 @@ public abstract class RequestDispatch implements ListenableFuture<Response>, Res * * @return A Future that can be waited for. */ - public final ListenableFuture<Response> dispatch() { + public final CompletableFuture<Response> dispatch() { try (FastContentWriter writer = new FastContentWriter(connect())) { for (ByteBuffer buf : requestContent()) { writer.write(buf); } completions.addOperand(writer); } - return this; + return CompletableFuture.allOf(completions.completableFuture(), futureResponse) + .thenApply(__ -> { + try { + return futureResponse.get(); + } catch (InterruptedException | ExecutionException e) { + throw new IllegalStateException(e); // Should not happens since both futures are complete + } + }); } - @Override public void addListener(Runnable listener, Executor executor) { - List<ListenableFuture<?>> combined = new ArrayList<>(2); - combined.add(completions); - combined.add(futureResponse); - Futures.allAsList(combined).addListener(listener, executor); + CompletableFuture.allOf(completions.completableFuture(), futureResponse) + .whenCompleteAsync((__, ___) -> listener.run(), executor); } @Override diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java index 377c8ecf4a9..9387171c1ac 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java @@ -1,15 +1,17 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ForwardingListenableFuture; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.SharedResource; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class provides a convenient way of safely dispatching a {@link Response}. It is similar in use to {@link @@ -34,7 +36,7 @@ import java.util.concurrent.Future; * * @author Simon Thoresen Hult */ -public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolean> { +public abstract class ResponseDispatch implements Future<Boolean> { private final FutureConjunction completions = new FutureConjunction(); @@ -90,19 +92,14 @@ public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolea * @param responseHandler The ResponseHandler to dispatch to. * @return A Future that can be waited for. */ - public final ListenableFuture<Boolean> dispatch(ResponseHandler responseHandler) { + public final CompletableFuture<Boolean> dispatch(ResponseHandler responseHandler) { try (FastContentWriter writer = new FastContentWriter(connect(responseHandler))) { for (ByteBuffer buf : responseContent()) { writer.write(buf); } completions.addOperand(writer); } - return this; - } - - @Override - protected final ListenableFuture<Boolean> delegate() { - return completions; + return completions.completableFuture(); } @Override @@ -115,6 +112,15 @@ public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolea return false; } + @Override public boolean isDone() { return completions.isDone(); } + + @Override public Boolean get() throws InterruptedException, ExecutionException { return completions.get(); } + + @Override + public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return completions.get(timeout, unit); + } + /** * <p>Factory method for creating a ResponseDispatch with a {@link Response} that has the given status code, and * ByteBuffer content.</p> diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/MockMetric.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/MockMetric.java index cb89320e580..bca6e7082c9 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/MockMetric.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/MockMetric.java @@ -35,6 +35,13 @@ public class MockMetric implements Metric { public Map<String, Map<Map<String, ?>, Double>> metrics() { return metrics; } + @Override + public String toString() { + return "MockMetric{" + + "metrics=" + metrics + + '}'; + } + private static class MapContext implements Context { private static final MapContext empty = new MapContext(Map.of()); diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java index 82929013dda..38d4c70646c 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.test; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.inject.AbstractModule; import com.google.inject.Key; import com.google.inject.Module; diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java index 199a12216ad..bfb4088aa99 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java @@ -4,5 +4,4 @@ * * @see com.yahoo.jdisc.test.TestDriver */ -@com.yahoo.api.annotations.PublicApi package com.yahoo.jdisc.test; diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java index aa6c4ce3b1b..45bc230896f 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; import org.mockito.Mockito; @@ -188,7 +187,7 @@ public class FastContentWriterTestCase { ReadableContentChannel buf = new ReadableContentChannel(); FastContentWriter out = new FastContentWriter(buf); RunnableLatch listener = new RunnableLatch(); - out.addListener(listener, MoreExecutors.directExecutor()); + out.addListener(listener, Runnable::run); out.write(new byte[] { 6, 9 }); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java index 6c3803f4f56..ef63b200b5f 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; import java.util.concurrent.ExecutionException; @@ -91,14 +90,14 @@ public class FutureCompletionTestCase { public void requireThatCompletionCanBeListenedTo() throws InterruptedException { FutureCompletion completion = new FutureCompletion(); RunnableLatch listener = new RunnableLatch(); - completion.addListener(listener, MoreExecutors.directExecutor()); + completion.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); completion.completed(); assertTrue(listener.await(600, TimeUnit.SECONDS)); completion = new FutureCompletion(); listener = new RunnableLatch(); - completion.addListener(listener, MoreExecutors.directExecutor()); + completion.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); completion.failed(new Throwable()); assertTrue(listener.await(600, TimeUnit.SECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java index 346b06e0f23..1aa78a16dfc 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java @@ -1,41 +1,37 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; -import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult */ public class FutureConjunctionTestCase { - private final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); + private final ExecutorService executor = Executors.newCachedThreadPool(); @Test public void requireThatAllFuturesAreWaitedFor() throws Exception { final CountDownLatch latch = new CountDownLatch(1); FutureConjunction future = new FutureConjunction(); - future.addOperand(executor.submit(new Callable<Boolean>() { - - @Override - public Boolean call() throws Exception { - return latch.await(600, TimeUnit.SECONDS); - } - })); + CompletableFuture<Boolean> cf = new CompletableFuture<>(); + cf.completeAsync(() -> { + try { return latch.await(600, TimeUnit.SECONDS); } + catch (InterruptedException e) { return false; } + }, executor); + future.addOperand(cf); try { future.get(100, TimeUnit.MILLISECONDS); fail(); @@ -118,7 +114,7 @@ public class FutureConjunctionTestCase { public void requireThatConjunctionCanBeListenedTo() throws InterruptedException { FutureConjunction conjunction = new FutureConjunction(); RunnableLatch listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertTrue(listener.await(600, TimeUnit.SECONDS)); conjunction = new FutureConjunction(); @@ -127,7 +123,7 @@ public class FutureConjunctionTestCase { FutureBoolean bar = new FutureBoolean(); conjunction.addOperand(bar); listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); foo.set(true); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); @@ -140,7 +136,7 @@ public class FutureConjunctionTestCase { bar = new FutureBoolean(); conjunction.addOperand(bar); listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); bar.set(true); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); @@ -190,14 +186,14 @@ public class FutureConjunctionTestCase { return foo.isCancelled(); } - private static class FutureBoolean extends AbstractFuture<Boolean> { + private static class FutureBoolean extends CompletableFuture<Boolean> { public boolean set(Boolean val) { - return super.set(val); + return super.complete(val); } } - private static class MyFuture extends AbstractFuture<Boolean> { + private static class MyFuture extends CompletableFuture<Boolean> { final boolean value; final boolean isDone; @@ -236,19 +232,19 @@ public class FutureConjunctionTestCase { return value; } - static ListenableFuture<Boolean> newInstance(boolean value) { + static CompletableFuture<Boolean> newInstance(boolean value) { return new MyFuture(value, false, false, false); } - static ListenableFuture<Boolean> newIsDone(boolean isDone) { + static CompletableFuture<Boolean> newIsDone(boolean isDone) { return new MyFuture(false, isDone, false, false); } - static ListenableFuture<Boolean> newCanCancel(boolean canCancel) { + static CompletableFuture<Boolean> newCanCancel(boolean canCancel) { return new MyFuture(false, false, canCancel, false); } - static ListenableFuture<Boolean> newIsCancelled(boolean isCancelled) { + static CompletableFuture<Boolean> newIsCancelled(boolean isCancelled) { return new MyFuture(false, false, false, isCancelled); } } diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java index 440698257a4..398f288e307 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.test.NonWorkingContentChannel; import org.junit.Test; @@ -73,7 +72,7 @@ public class FutureResponseTestCase { public void requireThatResponseCanBeListenedTo() throws InterruptedException { FutureResponse response = new FutureResponse(); RunnableLatch listener = new RunnableLatch(); - response.addListener(listener, MoreExecutors.directExecutor()); + response.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); response.handleResponse(new Response(Response.Status.OK)); assertTrue(listener.await(600, TimeUnit.SECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java index 3b49d1f349e..6ec78f01733 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -17,12 +16,12 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -218,7 +217,7 @@ public class RequestDispatchTestCase { protected Request newRequest() { return new Request(driver, URI.create("http://localhost/")); } - }.dispatch().addListener(listener, MoreExecutors.directExecutor()); + }.dispatch().whenComplete((__, ___) -> listener.run()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); ContentChannel responseContent = ResponseDispatch.newInstance(Response.Status.OK) .connect(requestHandler.responseHandler); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java index f9a5c22837f..4006ab072cb 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Response; import org.junit.Test; @@ -14,13 +13,13 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -179,7 +178,7 @@ public class ResponseDispatchTestCase { ReadableContentChannel responseContent = new ReadableContentChannel(); ResponseDispatch.newInstance(6, ByteBuffer.allocate(9)) .dispatch(new MyResponseHandler(responseContent)) - .addListener(listener, MoreExecutors.directExecutor()); + .whenComplete((__, ___) -> listener.run()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); assertNotNull(responseContent.read()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java index 71f207bbbff..f639877b87b 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -12,18 +11,19 @@ import org.junit.Test; import java.net.URI; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -159,8 +159,8 @@ public class ThreadedRequestHandlerTestCase { return driver; } - private static ListenableFuture<Response> dispatchRequest(final CurrentContainer container, final String uri, - final ByteBuffer... content) { + private static CompletableFuture<Response> dispatchRequest(final CurrentContainer container, final String uri, + final ByteBuffer... content) { return new RequestDispatch() { @Override diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java b/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java index 01b1e72d0b6..c9c7ec1db48 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.test; -import com.google.common.util.concurrent.SettableFuture; import com.google.inject.Inject; import com.google.inject.Module; import com.google.inject.util.Modules; @@ -20,6 +19,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collections; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -571,7 +571,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance try { request = new Request(server.container, URI.create("http://localhost/")); } catch (Throwable t) { - responseHandler.response.set(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); + responseHandler.response.complete(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); return responseHandler; } try { @@ -581,7 +581,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance } tryClose(out); } catch (Throwable t) { - responseHandler.response.set(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); + responseHandler.response.complete(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); // Simulate handling the failure. t.getMessage(); return responseHandler; @@ -594,13 +594,13 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance private static class MyResponseHandler implements ResponseHandler { - final SettableFuture<Response> response = SettableFuture.create(); - final SettableFuture<String> content = SettableFuture.create(); + final CompletableFuture<Response> response = new CompletableFuture<>(); + final CompletableFuture<String> content = new CompletableFuture<>(); final ByteArrayOutputStream out = new ByteArrayOutputStream(); @Override public ContentChannel handleResponse(final Response response) { - this.response.set(response); + this.response.complete(response); return new ContentChannel() { @Override @@ -613,7 +613,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance @Override public void close(final CompletionHandler handler) { - content.set(new String(out.toByteArray(), StandardCharsets.UTF_8)); + content.complete(new String(out.toByteArray(), StandardCharsets.UTF_8)); tryComplete(handler); } }; diff --git a/linguistics-components/abi-spec.json b/linguistics-components/abi-spec.json index 28025d84f25..f1deac67dc2 100644 --- a/linguistics-components/abi-spec.json +++ b/linguistics-components/abi-spec.json @@ -30,6 +30,7 @@ "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder collapseUnknowns(boolean)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder scoring(com.yahoo.language.sentencepiece.SentencePieceConfig$Scoring$Enum)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(com.yahoo.language.sentencepiece.SentencePieceConfig$Model$Builder)", + "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(java.util.function.Consumer)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/linguistics-components/src/main/java/com/yahoo/language/sentencepiece/SentencePieceEmbedder.java b/linguistics-components/src/main/java/com/yahoo/language/sentencepiece/SentencePieceEmbedder.java index 3f4e8ee3462..3afc85300d4 100644 --- a/linguistics-components/src/main/java/com/yahoo/language/sentencepiece/SentencePieceEmbedder.java +++ b/linguistics-components/src/main/java/com/yahoo/language/sentencepiece/SentencePieceEmbedder.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.language.sentencepiece; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.inject.Inject; import com.yahoo.language.Language; import com.yahoo.language.process.Embedder; diff --git a/linguistics/src/test/java/com/yahoo/language/opennlp/OptimaizeDetectorTestCase.java b/linguistics/src/test/java/com/yahoo/language/opennlp/OptimaizeDetectorTestCase.java index 3fc173dd82e..20b5de3b165 100644 --- a/linguistics/src/test/java/com/yahoo/language/opennlp/OptimaizeDetectorTestCase.java +++ b/linguistics/src/test/java/com/yahoo/language/opennlp/OptimaizeDetectorTestCase.java @@ -3,7 +3,6 @@ package com.yahoo.language.opennlp; import com.yahoo.language.Language; import com.yahoo.language.detect.Detector; -import com.yahoo.language.simple.SimpleDetector; import org.junit.Test; import static org.junit.Assert.assertEquals; diff --git a/logd/src/logd/watcher.cpp b/logd/src/logd/watcher.cpp index bab80dab7bd..23d1580dbf4 100644 --- a/logd/src/logd/watcher.cpp +++ b/logd/src/logd/watcher.cpp @@ -140,6 +140,7 @@ Watcher::watchfile() vespalib::SigCatch catcher; int sleepcount = 0; vespalib::system_time created = vespalib::system_time::min(); + vespalib::system_time lastPrune = vespalib::system_time::min(); again: // XXX should close and/or check _wfd first ? @@ -215,6 +216,11 @@ Watcher::watchfile() bool wantrotate = (now > created + _confsubscriber.getRotateAge()) || (sb.st_size > _confsubscriber.getRotateSize()); + if (now > lastPrune + 61s) { + removeOldLogs(filename); + lastPrune = now; + } + if (rotate) { vespalib::duration rotTime = rotTimer.elapsed(); off_t overflow_size = (1.1 * _confsubscriber.getRotateSize()); @@ -240,7 +246,6 @@ Watcher::watchfile() LOG(warning, "logfile spamming %d times, aggressively removing %s", spamfill_counter, newfn); unlink(newfn); } - removeOldLogs(filename); goto again; } } else if (stat(filename, &sb) != 0 diff --git a/messagebus/abi-spec.json b/messagebus/abi-spec.json index 623904bef8d..bff28986119 100644 --- a/messagebus/abi-spec.json +++ b/messagebus/abi-spec.json @@ -413,6 +413,7 @@ "public void <init>()", "public void <init>(com.yahoo.messagebus.MessagebusConfig)", "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -454,8 +455,10 @@ "public void <init>(com.yahoo.messagebus.MessagebusConfig$Routingtable)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder protocol(java.lang.String)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(com.yahoo.messagebus.MessagebusConfig$Routingtable$Hop$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(java.util.List)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(com.yahoo.messagebus.MessagebusConfig$Routingtable$Route$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(java.util.List)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable build()" ], diff --git a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp index 15b5bf81670..ed2ce3d638e 100644 --- a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp +++ b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp @@ -425,8 +425,7 @@ RPCNetwork::shutdown() { _transport->ShutDown(true); _threadPool->Close(); - _executor->shutdown(); - _executor->sync(); + _executor->shutdown().sync(); } void diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java index 7c6cae660a7..457e27a5896 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java @@ -4,9 +4,11 @@ package ai.vespa.metricsproxy.core; import ai.vespa.metricsproxy.core.ConsumersConfig.Consumer; import ai.vespa.metricsproxy.metric.model.ConsumerId; +import ai.vespa.metricsproxy.metric.model.MetricId; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -33,11 +35,20 @@ public class MetricsConsumers { // All consumers for each metric (more useful than the opposite map). private final Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric; + // All consumers for each metric, by metric id + private final Map<MetricId, Map<ConfiguredMetric, Set<ConsumerId>>> consumersByMetricByMetricId; + public MetricsConsumers(ConsumersConfig config) { consumerMetrics = config.consumer().stream().collect( toUnmodifiableLinkedMap(consumer -> ConsumerId.toConsumerId(consumer.name()), consumer -> convert(consumer.metric()))); consumersByMetric = createConsumersByMetric(consumerMetrics); + consumersByMetricByMetricId = new HashMap<>(); + consumersByMetric.forEach((configuredMetric, consumers) -> { + var consumersByMetric = consumersByMetricByMetricId.computeIfAbsent(configuredMetric.id(), id -> new HashMap<>()); + var consumerSet = consumersByMetric.computeIfAbsent(configuredMetric, id -> new HashSet<>()); + consumerSet.addAll(consumers); + }); } /** @@ -52,6 +63,10 @@ public class MetricsConsumers { return consumersByMetric; } + public Map<ConfiguredMetric, Set<ConsumerId>> getConsumersByMetric(MetricId id) { + return consumersByMetricByMetricId.get(id); + } + public Set<ConsumerId> getAllConsumers() { return unmodifiableSet(consumerMetrics.keySet()); } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java index 44eca2f57b4..3629e81582a 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.stream.Collectors; import static ai.vespa.metricsproxy.metric.dimensions.PublicDimensions.INTERNAL_SERVICE_ID; import static ai.vespa.metricsproxy.metric.model.DimensionId.toDimensionId; @@ -69,15 +68,13 @@ public class VespaMetrics { public List<MetricsPacket.Builder> getMetrics(List<VespaService> services) { List<MetricsPacket.Builder> metricsPackets = new ArrayList<>(); - Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric = metricsConsumers.getConsumersByMetric(); - for (VespaService service : services) { // One metrics packet for system metrics Optional<MetricsPacket.Builder> systemCheck = getSystemMetrics(service); systemCheck.ifPresent(metricsPackets::add); MetricAggregator aggregator = new MetricAggregator(service.getDimensions()); - GetServiceMetricsConsumer metricsConsumer = new GetServiceMetricsConsumer(consumersByMetric, aggregator); + GetServiceMetricsConsumer metricsConsumer = new GetServiceMetricsConsumer(metricsConsumers, aggregator); service.consumeMetrics(metricsConsumer); if (! aggregator.getAggregated().isEmpty()) { @@ -118,58 +115,50 @@ public class VespaMetrics { * In order to include a metric, it must exist in the given map of metric to consumers. * Each returned metric will contain a collection of consumers that it should be routed to. */ - private class GetServiceMetricsConsumer implements MetricsParser.Consumer { + private static class GetServiceMetricsConsumer implements MetricsParser.Consumer { private final MetricAggregator aggregator; - private final Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric; - GetServiceMetricsConsumer(Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric, MetricAggregator aggregator) { - this.consumersByMetric = consumersByMetric; + private final MetricsConsumers metricsConsumers; + GetServiceMetricsConsumer(MetricsConsumers metricsConsumers, MetricAggregator aggregator) { + this.metricsConsumers = metricsConsumers; this.aggregator = aggregator; } @Override public void consume(Metric candidate) { - getConfiguredMetrics(candidate.getName(), consumersByMetric.keySet()).forEach( - configuredMetric -> aggregator.aggregate( - metricWithConfigProperties(candidate, configuredMetric, consumersByMetric))); + Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric = metricsConsumers.getConsumersByMetric(candidate.getName()); + if (consumersByMetric != null) { + consumersByMetric.keySet().forEach( + configuredMetric -> aggregator.aggregate( + metricWithConfigProperties(candidate, configuredMetric, consumersByMetric.get(configuredMetric)))); + } } - } - - private Map<DimensionId, String> extractDimensions(Map<DimensionId, String> dimensions, List<Dimension> configuredDimensions) { - if ( ! configuredDimensions.isEmpty()) { - Map<DimensionId, String> dims = new HashMap<>(dimensions); - configuredDimensions.forEach(d -> dims.put(d.key(), d.value())); - dimensions = Collections.unmodifiableMap(dims); + private static Metric metricWithConfigProperties(Metric candidate, + ConfiguredMetric configuredMetric, + Set<ConsumerId> consumers) { + Metric metric = candidate.clone(); + metric.setDimensions(extractDimensions(candidate.getDimensions(), configuredMetric.dimension())); + metric.setConsumers(extractConsumers(consumers)); + + if (configuredMetric.outputname() != null && !configuredMetric.outputname().id.isEmpty()) + metric.setName(configuredMetric.outputname()); + return metric; } - return dimensions; - } - - private Set<ConsumerId> extractConsumers(Set<ConsumerId> configuredConsumers) { - Set<ConsumerId> consumers = Collections.emptySet(); - if (configuredConsumers != null) { - consumers = configuredConsumers; + private static Map<DimensionId, String> extractDimensions(Map<DimensionId, String> dimensions, List<Dimension> configuredDimensions) { + if ( ! configuredDimensions.isEmpty()) { + Map<DimensionId, String> dims = new HashMap<>(dimensions); + configuredDimensions.forEach(d -> dims.put(d.key(), d.value())); + dimensions = Collections.unmodifiableMap(dims); + } + return dimensions; } - return consumers; - } - private Metric metricWithConfigProperties(Metric candidate, - ConfiguredMetric configuredMetric, - Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric) { - Metric metric = candidate.clone(); - metric.setDimensions(extractDimensions(candidate.getDimensions(), configuredMetric.dimension())); - metric.setConsumers(extractConsumers(consumersByMetric.get(configuredMetric))); - - if (configuredMetric.outputname() != null && !configuredMetric.outputname().id.isEmpty()) - metric.setName(configuredMetric.outputname()); - return metric; - } - - /** - * Returns all configured metrics (for any consumer) that have the given id as 'name'. - */ - private static Set<ConfiguredMetric> getConfiguredMetrics(MetricId id, Set<ConfiguredMetric> configuredMetrics) { - return configuredMetrics.stream() - .filter(m -> m.id().equals(id)) - .collect(Collectors.toSet()); + private static Set<ConsumerId> extractConsumers(Set<ConsumerId> configuredConsumers) { + Set<ConsumerId> consumers = Collections.emptySet(); + if (configuredConsumers != null) { + consumers = configuredConsumers; + } + return consumers; + } } private Optional<MetricsPacket.Builder> getSystemMetrics(VespaService service) { diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java index 2ddbc71268c..80197758104 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java @@ -6,13 +6,13 @@ import ai.vespa.metricsproxy.metric.model.MetricsPacket; import ai.vespa.util.http.hc5.VespaAsyncHttpClientBuilder; import com.google.inject.Inject; import com.yahoo.component.AbstractComponent; -import org.apache.hc.client5.http.HttpHostConnectException; import org.apache.hc.client5.http.config.RequestConfig; import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import org.apache.hc.core5.reactor.IOReactorConfig; import org.apache.hc.core5.util.Timeout; import java.io.IOException; +import java.net.SocketException; import java.time.Clock; import java.time.Duration; import java.util.HashMap; @@ -71,8 +71,8 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru @Override public void run() { - try { - while (true) { + while (true) { + try { ConsumerId [] consumers; synchronized (pollThread) { consumers = consumerSet.toArray(new ConsumerId[0]); @@ -92,8 +92,12 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru pollThread.wait(timeUntilNextPoll.toMillis()); if (stopped) return; } + } catch (InterruptedException e) { + } catch (Exception e) { + log.log(Level.WARNING, "Got unknown exception:", e); } - } catch (InterruptedException e) {} + } + } @Override @@ -162,9 +166,8 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru if ((result != null) && result) numOk++; } catch (InterruptedException | ExecutionException | TimeoutException e) { Throwable cause = e.getCause(); - if ( e instanceof ExecutionException && (cause != null) && (cause instanceof HttpHostConnectException)) { - // Remove once we have some track time. - log.log(Level.WARNING, "Failed retrieving metrics for '" + entry.getKey() + "' : " + cause.getMessage()); + if ( e instanceof ExecutionException && (cause instanceof SocketException)) { + log.log(Level.FINE, "Failed retrieving metrics for '" + entry.getKey() + "' : " + cause.getMessage()); } else { log.log(Level.WARNING, "Failed retrieving metrics for '" + entry.getKey() + "' : ", e); } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java index 9277d09d02a..64a21a54999 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java @@ -13,6 +13,7 @@ import java.util.Locale; * @author Unknown */ public class MetricsFormatter { + private final boolean includeServiceName; private final boolean isSystemMetric; private final DecimalFormat df = new DecimalFormat("0.000", new DecimalFormatSymbols(Locale.ENGLISH)); diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java index 079633b28a1..8157ecb72fd 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java @@ -145,6 +145,7 @@ public class MetricsParser { } JsonNode aggregates = metric.get("values"); + String prefix = name + "."; for (Iterator<?> it = aggregates.fieldNames(); it.hasNext(); ) { String aggregator = (String) it.next(); JsonNode aggregatorValue = aggregates.get(aggregator); @@ -155,7 +156,7 @@ public class MetricsParser { if (value == null) { throw new IllegalArgumentException("Value for aggregator '" + aggregator + "' is not a number"); } - String metricName = new StringBuilder().append(name).append(".").append(aggregator).toString(); + String metricName = prefix + aggregator; consumer.consume(new Metric(MetricId.toMetricId(metricName), value, timestamp, dim, description)); } } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/SystemPoller.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/SystemPoller.java index 49668a59d63..c548d187569 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/SystemPoller.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/SystemPoller.java @@ -163,9 +163,9 @@ public class SystemPoller { List<VespaService> services, Map<VespaService, Long> lastCpuJiffiesMetrics) { JiffiesAndCpus sysJiffies = getJiffies.getTotalSystemJiffies(); JiffiesAndCpus sysJiffiesDiff = sysJiffies.diff(prevTotalJiffies); + log.log(Level.FINE, () -> "Total jiffies: " + sysJiffies.jiffies + " - " + prevTotalJiffies.jiffies + " = " + sysJiffiesDiff.jiffies); for (VespaService s : services) { Metrics metrics = new Metrics(); - log.log(Level.FINE, () -> "Current size of system metrics for service " + s + " is " + metrics.size()); long[] size = getMemoryUsage(s); log.log(Level.FINE, () -> "Updating memory metric for service " + s); @@ -177,12 +177,14 @@ public class SystemPoller { long last = lastCpuJiffiesMetrics.get(s); long diff = procJiffies - last; + log.log(Level.FINE, () -> "Service " + s + " jiffies: " + procJiffies + " - " + last + " = " + diff); if (diff >= 0) { metrics.add(new Metric(CPU, 100 * sysJiffiesDiff.ratioSingleCoreJiffies(diff), timeStamp)); metrics.add(new Metric(CPU_UTIL, 100 * sysJiffiesDiff.ratioJiffies(diff), timeStamp)); } lastCpuJiffiesMetrics.put(s, procJiffies); s.setSystemMetrics(metrics); + log.log(Level.FINE, () -> "Current size of system metrics for service " + s + " is " + metrics.size()); } return sysJiffies; } diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java index 9e7849f635e..4d1fb802afc 100644 --- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java +++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java @@ -46,7 +46,6 @@ import static org.junit.Assert.fail; /** * @author gjoranv */ -@SuppressWarnings("UnstableApiUsage") public class ApplicationMetricsHandlerTest { private static final ObjectMapper jsonMapper = new ObjectMapper(); @@ -79,8 +78,8 @@ public class ApplicationMetricsHandlerTest { public void setup() { setupWireMock(); - ApplicationMetricsRetriever applicationMetricsRetriever = new ApplicationMetricsRetriever( - nodesConfig(MOCK_METRICS_PATH)); + ApplicationMetricsRetriever applicationMetricsRetriever = + new ApplicationMetricsRetriever(nodesConfig(MOCK_METRICS_PATH)); ApplicationMetricsHandler handler = new ApplicationMetricsHandler(Executors.newSingleThreadExecutor(), applicationMetricsRetriever, diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java index 8af5f7bc499..8e7918bae9b 100644 --- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java +++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.models.evaluation; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.yahoo.searchlib.rankingexpression.ExpressionFunction; diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java index 01427ca811a..1554f11195f 100644 --- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java +++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.models.evaluation; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableMap; import com.google.inject.Inject; import com.yahoo.component.AbstractComponent; diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java index 9336451d08d..38e725360a0 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java @@ -162,7 +162,7 @@ public class RealNodeRepository implements NodeRepository { return new NodeSpec( node.hostname, - Optional.ofNullable(node.openStackId), + Optional.ofNullable(node.id), Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString), Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString), nodeState, @@ -244,7 +244,7 @@ public class RealNodeRepository implements NodeRepository { private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) { NodeRepositoryNode node = new NodeRepositoryNode(); - node.openStackId = addNode.id.orElse("fake-" + addNode.hostname); + node.id = addNode.id.orElse("fake-" + addNode.hostname); node.hostname = addNode.hostname; node.parentHostname = addNode.parentHostname.orElse(null); addNode.nodeFlavor.ifPresent(f -> node.flavor = f); @@ -269,7 +269,7 @@ public class RealNodeRepository implements NodeRepository { public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) { NodeRepositoryNode node = new NodeRepositoryNode(); - node.openStackId = nodeAttributes.getHostId().orElse(null); + node.id = nodeAttributes.getHostId().orElse(null); node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null); node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null); node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java index 1e51fe279bb..f99fb3d8b76 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java @@ -25,8 +25,8 @@ public class NodeRepositoryNode { public Set<String> ipAddresses; @JsonProperty("additionalIpAddresses") public Set<String> additionalIpAddresses; - @JsonProperty("openStackId") - public String openStackId; + @JsonProperty("id") + public String id; @JsonProperty("flavor") public String flavor; @JsonProperty("resources") @@ -99,7 +99,7 @@ public class NodeRepositoryNode { ", hostname='" + hostname + '\'' + ", ipAddresses=" + ipAddresses + ", additionalIpAddresses=" + additionalIpAddresses + - ", openStackId='" + openStackId + '\'' + + ", id='" + id + '\'' + ", modelName='" + modelName + '\'' + ", flavor='" + flavor + '\'' + ", resources=" + resources + diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java index 60435082745..a29f6a89283 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java @@ -84,7 +84,8 @@ public class CoreCollector { List<String> readBacktrace(NodeAgentContext context, ContainerPath coredumpPath, String binPath, boolean allThreads) { String threads = allThreads ? "thread apply all bt" : "bt"; - String[] command = {getGdbPath(context), "-n", "-ex", threads, "-batch", binPath, coredumpPath.pathInContainer()}; + String[] command = {getGdbPath(context), "-n", "-ex", "set print frame-arguments none", + "-ex", threads, "-batch", binPath, coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java index 1854ed67389..a9f2cd219b9 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java @@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId; import java.net.URI; import java.nio.file.Path; +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -18,13 +19,15 @@ public class SyncFileInfo { private final URI destination; private final Compression uploadCompression; private final Map<String, String> tags; + private final Optional<Duration> minDurationBetweenSync; private SyncFileInfo(Path source, URI destination, Compression uploadCompression, - Map<String, String> tags) { + Map<String, String> tags, Duration minDurationBetweenSyncOrNull) { this.source = source; this.destination = destination; this.uploadCompression = uploadCompression; this.tags = Map.copyOf(tags); + this.minDurationBetweenSync = Optional.ofNullable(minDurationBetweenSyncOrNull); } /** Source path of the file to sync */ @@ -44,14 +47,18 @@ public class SyncFileInfo { public Map<String, String> tags() { return tags; } + public Optional<Duration> minDurationBetweenSync() { return minDurationBetweenSync; } + public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; String dir = null; + Duration minDurationBetweenSync = null; - if ((!rotatedOnly && filename.equals("vespa.log")) || filename.startsWith("vespa.log-")) { + if (filename.startsWith("vespa.log")) { dir = "logs/vespa/"; compression = Compression.ZSTD; + minDurationBetweenSync = filename.length() == 9 ? rotatedOnly ? Duration.ofHours(1) : Duration.ZERO : null; } else { compression = filename.endsWith(".zst") ? Compression.NONE : Compression.ZSTD; if (rotatedOnly && compression != Compression.NONE) @@ -64,7 +71,8 @@ public class SyncFileInfo { if (dir == null) return Optional.empty(); return Optional.of(new SyncFileInfo( - logFile, uri.resolve(dir + logFile.getFileName() + compression.extension), compression, defaultTags(owner))); + logFile, uri.resolve(dir + logFile.getFileName() + compression.extension), compression, defaultTags(owner), + minDurationBetweenSync)); } public static SyncFileInfo forServiceDump(URI destinationDir, Path file, Compression compression, @@ -75,7 +83,7 @@ public class SyncFileInfo { if (assetClassification != null) { tags.put("vespa:AssetClassification", assetClassification); } - return new SyncFileInfo(file, location, compression, tags); + return new SyncFileInfo(file, location, compression, tags, null); } private static Map<String, String> defaultTags(ApplicationId owner) { diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserNamespace.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserNamespace.java index 776de103a13..40a8ab16923 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserNamespace.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserNamespace.java @@ -14,8 +14,8 @@ public class UserNamespace { * Real value in /proc/sys/fs/overflowuid or overflowgid, hardcode default value*/ private static final int OVERFLOW_ID = 65_534; - private volatile int uidOffset; - private volatile int gidOffset; + private final int uidOffset; + private final int gidOffset; private final int idRangeSize; public UserNamespace(int uidOffset, int gidOffset, int idRangeSize) { @@ -32,12 +32,6 @@ public class UserNamespace { public int idRangeSize() { return idRangeSize; } public int overflowId() { return OVERFLOW_ID; } - // Remove after migration to mapped namespaces is complete, make fields final - public void setOffsets(int idOffset) { - this.uidOffset = idOffset; - this.gidOffset = idOffset; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java index 8ab6bce2b8c..b4ff7a0ceae 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java @@ -33,8 +33,8 @@ public class CoreCollectorTest { private final String TEST_BIN_PATH = "/usr/bin/program"; private final List<String> GDB_BACKTRACE = List.of("[New Thread 2703]", "Core was generated by `/usr/bin/program\'.", "Program terminated with signal 11, Segmentation fault.", - "#0 0x00000000004004d8 in main (argv=0x1) at main.c:4", "4\t printf(argv[3]);", - "#0 0x00000000004004d8 in main (argv=0x1) at main.c:4"); + "#0 0x00000000004004d8 in main (argv=...) at main.c:4", "4\t printf(argv[3]);", + "#0 0x00000000004004d8 in main (argv=...) at main.c:4"); @Test public void extractsBinaryPathTest() { @@ -92,24 +92,29 @@ public class CoreCollectorTest { @Test public void extractsBacktraceUsingGdb() { - mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", + "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false)); - mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", + "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, "", "Failure"); try { coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false); fail("Expected not to be able to read backtrace"); } catch (RuntimeException e) { assertEquals("Failed to read backtrace exit status 1, output 'Failure', Command: " + - "[" + GDB_PATH_RHEL8 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage()); + "[" + GDB_PATH_RHEL8 + ", -n, -ex, set print frame-arguments none, -ex, bt, -batch, " + + "/usr/bin/program, /tmp/core.1234]", e.getMessage()); } } @Test public void extractsBacktraceFromAllThreadsUsingGdb() { - mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch", + mockExec(new String[]{GDB_PATH_RHEL8, "-n", + "-ex", "set print frame-arguments none", + "-ex", "thread apply all bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, true)); @@ -120,9 +125,11 @@ public class CoreCollectorTest { mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()}, "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " + "'/usr/bin/program'"); - mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", + "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); - mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch", + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", + "-ex", "thread apply all bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); @@ -138,7 +145,7 @@ public class CoreCollectorTest { mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()}, "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " + "'/usr/bin/program'"); - mockExec(new String[]{GDB_PATH_RHEL8 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"}, + mockExec(new String[]{GDB_PATH_RHEL8 + " -n -ex set print frame-arguments none -ex bt -batch /usr/bin/program /tmp/core.1234"}, "", "Failure"); Map<String, Object> expectedData = Map.of("bin_path", TEST_BIN_PATH); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java index 7130ac54430..54701b0f5a7 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java @@ -8,6 +8,7 @@ import org.junit.Test; import java.net.URI; import java.nio.file.FileSystem; import java.nio.file.Path; +import java.time.Duration; import java.util.Optional; import static com.yahoo.vespa.hosted.node.admin.maintenance.sync.SyncFileInfo.Compression.NONE; @@ -57,16 +58,21 @@ public class SyncFileInfoTest { @Test public void vespa_logs() { - assertForLogFile(vespaLogPath1, null, null, true); - assertForLogFile(vespaLogPath1, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/vespa/vespa.log.zst", ZSTD, false); + assertForLogFile(vespaLogPath1, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/vespa/vespa.log.zst", ZSTD, Duration.ofHours(1), true); + assertForLogFile(vespaLogPath1, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/vespa/vespa.log.zst", ZSTD, Duration.ZERO, false); assertForLogFile(vespaLogPath2, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/vespa/vespa.log-2021-02-12.zst", ZSTD, true); assertForLogFile(vespaLogPath2, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/vespa/vespa.log-2021-02-12.zst", ZSTD, false); } private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) { + assertForLogFile(srcPath, destination, compression, null, rotatedOnly); + } + + private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, Duration minDurationBetweenSync, boolean rotatedOnly) { Optional<SyncFileInfo> sfi = SyncFileInfo.forLogFile(nodeArchiveUri, srcPath, rotatedOnly, ApplicationId.defaultId()); assertEquals(destination, sfi.map(SyncFileInfo::destination).map(URI::toString).orElse(null)); assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null)); + assertEquals(minDurationBetweenSync, sfi.flatMap(SyncFileInfo::minDurationBetweenSync).orElse(null)); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java index e7a20ff54b3..4d63863a917 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java @@ -69,13 +69,13 @@ public final class Node implements Nodelike { } /** Creates a node builder in the initial state (provisioned) */ - public static Node.Builder create(String openStackId, IP.Config ipConfig, String hostname, Flavor flavor, NodeType type) { - return new Node.Builder(openStackId, hostname, flavor, State.provisioned, type).ipConfig(ipConfig); + public static Node.Builder create(String id, IP.Config ipConfig, String hostname, Flavor flavor, NodeType type) { + return new Node.Builder(id, hostname, flavor, State.provisioned, type).ipConfig(ipConfig); } /** Creates a node builder */ - public static Node.Builder create(String openStackId, String hostname, Flavor flavor, Node.State state, NodeType type) { - return new Node.Builder(openStackId, hostname, flavor, state, type); + public static Node.Builder create(String id, String hostname, Flavor flavor, Node.State state, NodeType type) { + return new Node.Builder(id, hostname, flavor, state, type); } /** DO NOT USE: public for serialization purposes. See {@code create} helper methods. */ @@ -328,9 +328,9 @@ public final class Node implements Nodelike { allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems); } - /** Returns a copy of this with the openStackId set */ - public Node withOpenStackId(String openStackId) { - return new Node(openStackId, ipConfig, hostname, parentHostname, flavor, status, state, + /** Returns a copy of this with given id set */ + public Node withId(String id) { + return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java index 3c2ab8cead2..df5044de05c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java @@ -2,10 +2,9 @@ package com.yahoo.vespa.hosted.provision.applications; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.ClusterResources; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.HashMap; import java.util.Optional; @@ -59,12 +58,12 @@ public class Application { * Returns an application with the given cluster having the min and max resource limits of the given cluster. * If the cluster has a target which is not inside the new limits, the target is removed. */ - public Application withCluster(ClusterSpec.Id id, boolean exclusive, ClusterResources min, ClusterResources max) { + public Application withCluster(ClusterSpec.Id id, boolean exclusive, Capacity requested) { Cluster cluster = clusters.get(id); if (cluster == null) - cluster = new Cluster(id, exclusive, min, max, Optional.empty(), Optional.empty(), List.of(), AutoscalingStatus.empty()); + cluster = Cluster.create(id, exclusive, requested); else - cluster = cluster.withConfiguration(exclusive, min, max); + cluster = cluster.withConfiguration(exclusive, requested); return with(cluster); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java index f44785cfab3..71a6d661594 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.applications; -import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler; - import java.util.Objects; /** diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java index 5478999e4fe..ad20f68ca33 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.applications; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler; @@ -25,6 +26,7 @@ public class Cluster { private final ClusterSpec.Id id; private final boolean exclusive; private final ClusterResources min, max; + private final boolean required; private final Optional<Suggestion> suggested; private final Optional<ClusterResources> target; @@ -36,6 +38,7 @@ public class Cluster { boolean exclusive, ClusterResources minResources, ClusterResources maxResources, + boolean required, Optional<Suggestion> suggestedResources, Optional<ClusterResources> targetResources, List<ScalingEvent> scalingEvents, @@ -44,6 +47,7 @@ public class Cluster { this.exclusive = exclusive; this.min = Objects.requireNonNull(minResources); this.max = Objects.requireNonNull(maxResources); + this.required = required; this.suggested = Objects.requireNonNull(suggestedResources); Objects.requireNonNull(targetResources); if (targetResources.isPresent() && ! targetResources.get().isWithin(minResources, maxResources)) @@ -56,14 +60,20 @@ public class Cluster { public ClusterSpec.Id id() { return id; } + /** Returns whether the nodes allocated to this cluster must be on host exclusively dedicated to this application */ + public boolean exclusive() { return exclusive; } + /** Returns the configured minimal resources in this cluster */ public ClusterResources minResources() { return min; } /** Returns the configured maximal resources in this cluster */ public ClusterResources maxResources() { return max; } - /** Returns whether the nodes allocated to this cluster must be on host exclusively dedicated to this application */ - public boolean exclusive() { return exclusive; } + /** + * Returns whether the resources of this cluster are required to be within the specified min and max. + * Otherwise they may be adjusted by capacity policies. + */ + public boolean required() { return required; } /** * Returns the computed resources (between min and max, inclusive) this cluster should @@ -97,16 +107,18 @@ public class Cluster { /** The latest autoscaling status of this cluster, or unknown (never null) if none */ public AutoscalingStatus autoscalingStatus() { return autoscalingStatus; } - public Cluster withConfiguration(boolean exclusive, ClusterResources min, ClusterResources max) { - return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus); + public Cluster withConfiguration(boolean exclusive, Capacity capacity) { + return new Cluster(id, exclusive, + capacity.minResources(), capacity.maxResources(), capacity.isRequired(), + suggested, target, scalingEvents, autoscalingStatus); } public Cluster withSuggested(Optional<Suggestion> suggested) { - return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus); + return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus); } public Cluster withTarget(Optional<ClusterResources> target) { - return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus); + return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus); } /** Add or update (based on "at" time) a scaling event */ @@ -120,12 +132,12 @@ public class Cluster { scalingEvents.add(scalingEvent); prune(scalingEvents); - return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus); + return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus); } public Cluster with(AutoscalingStatus autoscalingStatus) { if (autoscalingStatus.equals(this.autoscalingStatus)) return this; - return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus); + return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus); } @Override @@ -156,6 +168,11 @@ public class Cluster { return -1; } + public static Cluster create(ClusterSpec.Id id, boolean exclusive, Capacity requested) { + return new Cluster(id, exclusive, requested.minResources(), requested.maxResources(), requested.isRequired(), + Optional.empty(), Optional.empty(), List.of(), AutoscalingStatus.empty()); + } + public static class Suggestion { private final ClusterResources resources; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java index f1e707be7b4..849ea03665b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java @@ -8,10 +8,12 @@ import com.yahoo.config.provision.NodeResources; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; +import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies; import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits; import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; /** * @author bratseth @@ -54,14 +56,14 @@ public class AllocatableClusterResources { public AllocatableClusterResources(ClusterResources realResources, NodeResources advertisedResources, - NodeResources idealResources, + ClusterResources idealResources, ClusterSpec clusterSpec) { this.nodes = realResources.nodes(); this.groups = realResources.groups(); this.realResources = realResources.nodeResources(); this.advertisedResources = advertisedResources; this.clusterSpec = clusterSpec; - this.fulfilment = fulfilment(realResources.nodeResources(), idealResources); + this.fulfilment = fulfilment(realResources, idealResources); } /** @@ -99,10 +101,10 @@ public class AllocatableClusterResources { */ public double fulfilment() { return fulfilment; } - private static double fulfilment(NodeResources realResources, NodeResources idealResources) { - double vcpuFulfilment = Math.min(1, realResources.vcpu() / idealResources.vcpu()); - double memoryGbFulfilment = Math.min(1, realResources.memoryGb() / idealResources.memoryGb()); - double diskGbFulfilment = Math.min(1, realResources.diskGb() / idealResources.diskGb()); + private static double fulfilment(ClusterResources realResources, ClusterResources idealResources) { + double vcpuFulfilment = Math.min(1, realResources.totalResources().vcpu() / idealResources.totalResources().vcpu()); + double memoryGbFulfilment = Math.min(1, realResources.totalResources().memoryGb() / idealResources.totalResources().memoryGb()); + double diskGbFulfilment = Math.min(1, realResources.totalResources().diskGb() / idealResources.totalResources().diskGb()); return (vcpuFulfilment + memoryGbFulfilment + diskGbFulfilment) / 3; } @@ -148,11 +150,13 @@ public class AllocatableClusterResources { advertisedResources = systemLimits.enlargeToLegal(advertisedResources, clusterSpec.type(), exclusive); // Ask for something legal advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // What we'll really get - if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) return Optional.empty(); + if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) + return Optional.empty(); + if (matchesAny(hosts, advertisedResources)) return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources), advertisedResources, - wantedResources.nodeResources(), + wantedResources, clusterSpec)); else return Optional.empty(); @@ -180,7 +184,7 @@ public class AllocatableClusterResources { if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) continue; var candidate = new AllocatableClusterResources(wantedResources.with(realResources), advertisedResources, - wantedResources.nodeResources(), + wantedResources, clusterSpec); if (best.isEmpty() || candidate.preferableTo(best.get())) best = Optional.of(candidate); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java index 6fd9801164a..30432c1c078 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java @@ -65,14 +65,13 @@ public class AllocationOptimizer { nodeResourcesWith(nodesAdjustedForRedundancy, groupsAdjustedForRedundancy, limits, target, current, clusterModel)); - var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits, hosts, nodeRepository); - + var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits, + hosts, nodeRepository); if (allocatableResources.isEmpty()) continue; if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get())) bestAllocation = allocatableResources; } } - return bestAllocation; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java index dcfb8fb7246..80c192f8353 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java @@ -212,7 +212,7 @@ public class Autoscaler { @Override public String toString() { return "autoscaling advice: " + - (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : "None"); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java index 35aafd3e0f4..3c26eef41d9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java @@ -24,14 +24,20 @@ public class ClusterModel { private static final Logger log = Logger.getLogger(ClusterModel.class.getName()); - private static final Duration CURRENT_LOAD_DURATION = Duration.ofMinutes(5); + /** Containers typically use more cpu right after generation change, so discard those metrics */ + public static final Duration warmupDuration = Duration.ofSeconds(90); + + private static final Duration currentLoadDuration = Duration.ofMinutes(5); static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.65; - static final double idealDiskLoad = 0.6; + static final double idealContainerDiskLoad = 0.95; + static final double idealContentDiskLoad = 0.6; private final Application application; + private final ClusterSpec clusterSpec; + private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; private final Clock clock; @@ -50,6 +56,8 @@ public class ClusterModel { MetricsDb metricsDb, Clock clock) { this.application = application; + this.clusterSpec = clusterSpec; + this.cluster = cluster; this.nodes = clusterNodes; this.clock = clock; this.scalingDuration = computeScalingDuration(cluster, clusterSpec); @@ -59,12 +67,15 @@ public class ClusterModel { /** For testing */ ClusterModel(Application application, + ClusterSpec clusterSpec, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries, ClusterNodesTimeseries nodeTimeseries) { this.application = application; + this.clusterSpec = clusterSpec; + this.cluster = cluster; this.nodes = null; this.clock = clock; @@ -73,6 +84,10 @@ public class ClusterModel { this.nodeTimeseries = nodeTimeseries; } + public Application application() { return application; } + public ClusterSpec clusterSpec() { return clusterSpec; } + public Cluster cluster() { return cluster; } + /** Returns the predicted duration of a rescaling of this cluster */ public Duration scalingDuration() { return scalingDuration; } @@ -95,14 +110,14 @@ public class ClusterModel { return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } - /** Returns average load during the last {@link #CURRENT_LOAD_DURATION} */ - public Load currentLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(CURRENT_LOAD_DURATION)); } + /** Returns average load during the last {@link #currentLoadDuration} */ + public Load currentLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(currentLoadDuration)); } /** Returns average load during the last {@link #scalingDuration()} */ public Load averageLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(scalingDuration())); } public Load idealLoad() { - return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); + return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad()); } /** Ideal cpu load must take the application traffic fraction into account */ @@ -185,6 +200,12 @@ public class ClusterModel { return duration; } + private double idealDiskLoad() { + // Stateless clusters are not expected to consume more disk over time - + // if they do it is due to logs which will be rotated away right before the disk is full + return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad; + } + /** * Create a cluster model if possible and logs a warning and returns empty otherwise. * This is useful in cases where it's possible to continue without the cluser model, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java index f5657966e5f..5ad4ef2e263 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java @@ -10,6 +10,8 @@ import java.util.List; import java.util.function.Predicate; import java.util.stream.Collectors; +import static com.yahoo.vespa.hosted.provision.autoscale.ClusterModel.warmupDuration; + /** * A series of metric snapshots for the nodes of a cluster used to compute load * @@ -24,13 +26,18 @@ public class ClusterNodesTimeseries { public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clusterNodes, MetricsDb db) { this.clusterNodes = clusterNodes; - var timeseries = db.getNodeTimeseries(period, clusterNodes); - - if (cluster.lastScalingEvent().isPresent()) - timeseries = filter(timeseries, snapshot -> snapshot.generation() < 0 || // Content nodes do not yet send generation - snapshot.generation() >= cluster.lastScalingEvent().get().generation()); - timeseries = filter(timeseries, snapshot -> snapshot.inService() && snapshot.stable()); + // See warmupSeconds*4 into the past to see any generation change in it + // If none can be detected we assume the node is new/was down. + // If either this is the case, or there is a generation change, we ignore + // the first warmupWindow metrics + var timeseries = db.getNodeTimeseries(period.plus(warmupDuration.multipliedBy(4)), clusterNodes); + if (cluster.lastScalingEvent().isPresent()) { + long currentGeneration = cluster.lastScalingEvent().get().generation(); + timeseries = keepCurrentGenerationAfterWarmup(timeseries, currentGeneration); + } + timeseries = keep(timeseries, snapshot -> snapshot.inService() && snapshot.stable()); + timeseries = keep(timeseries, snapshot -> ! snapshot.at().isBefore(db.clock().instant().minus(period))); this.timeseries = timeseries; } @@ -62,8 +69,15 @@ public class ClusterNodesTimeseries { return total.divide(count); } - private List<NodeTimeseries> filter(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) { - return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.filter(filter)).collect(Collectors.toList()); + private static List<NodeTimeseries> keep(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) { + return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.keep(filter)).collect(Collectors.toList()); + } + + private static List<NodeTimeseries> keepCurrentGenerationAfterWarmup(List<NodeTimeseries> timeseries, + long currentGeneration) { + return timeseries.stream() + .map(nodeTimeseries -> nodeTimeseries.keepCurrentGenerationAfterWarmup(currentGeneration)) + .collect(Collectors.toList()); } public static ClusterNodesTimeseries empty() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java index 96896bb1ba0..131873b0137 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java @@ -47,16 +47,19 @@ public class ClusterTimeseries { /** * The max query growth rate we can predict from this time-series as a fraction of the average traffic in the window + * + * @return the predicted max growth of the query rate, per minute as a fraction of the current load */ public double maxQueryGrowthRate(Duration window, Clock clock) { if (snapshots.isEmpty()) return 0.1; // Find the period having the highest growth rate, where total growth exceeds 30% increase - double maxGrowthRate = 0; // In query rate per minute + double maxGrowthRate = 0; // In query rate growth per second (to get good resolution) + for (int start = 0; start < snapshots.size(); start++) { if (start > 0) { // Optimization: Skip this point when starting from the previous is better relative to the best rate so far Duration duration = durationBetween(start - 1, start); - if (duration.toMinutes() != 0) { - double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes(); + if (duration.toSeconds() != 0) { + double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toSeconds(); if (growthRate >= maxGrowthRate) continue; } @@ -64,8 +67,8 @@ public class ClusterTimeseries { for (int end = start + 1; end < snapshots.size(); end++) { if (queryRateAt(end) >= queryRateAt(start) * 1.3) { Duration duration = durationBetween(start, end); - if (duration.toMinutes() == 0) continue; - double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes(); + if (duration.toSeconds() == 0) continue; + double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toSeconds(); if (growthRate > maxGrowthRate) maxGrowthRate = growthRate; } @@ -79,7 +82,7 @@ public class ClusterTimeseries { } OptionalDouble queryRate = queryRate(window, clock); if (queryRate.orElse(0) == 0) return 0.1; // Growth not expressible as a fraction of the current rate - return maxGrowthRate / queryRate.getAsDouble(); + return maxGrowthRate * 60 / queryRate.getAsDouble(); } /** diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java index d9544b334ea..9eefd4e60b7 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java @@ -4,8 +4,6 @@ package com.yahoo.vespa.hosted.provision.autoscale; import com.yahoo.collections.Pair; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeRepository; import java.time.Clock; import java.time.Duration; @@ -15,7 +13,6 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -73,10 +70,10 @@ public class MemoryMetricsDb implements MetricsDb { Instant startTime = clock().instant().minus(period); synchronized (lock) { if (hostnames.isEmpty()) - return nodeTimeseries.values().stream().map(ns -> ns.justAfter(startTime)).collect(Collectors.toList()); + return nodeTimeseries.values().stream().map(ns -> ns.keepAfter(startTime)).collect(Collectors.toList()); else return hostnames.stream() - .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).justAfter(startTime)) + .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).keepAfter(startTime)) .collect(Collectors.toList()); } } @@ -94,7 +91,7 @@ public class MemoryMetricsDb implements MetricsDb { // 12 hours with 1k nodes and 3 resources and 1 measurement/sec is about 5Gb for (String hostname : nodeTimeseries.keySet()) { var timeseries = nodeTimeseries.get(hostname); - timeseries = timeseries.justAfter(clock().instant().minus(Autoscaler.maxScalingWindow())); + timeseries = timeseries.keepAfter(clock().instant().minus(Autoscaler.maxScalingWindow())); if (timeseries.isEmpty()) nodeTimeseries.remove(hostname); else diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java index 864df9a16c4..4a5f8972e11 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java @@ -9,6 +9,8 @@ import java.util.Optional; import java.util.function.Predicate; import java.util.stream.Collectors; +import static com.yahoo.vespa.hosted.provision.autoscale.ClusterModel.warmupDuration; + /** * A list of metric snapshots from a node, sorted by increasing time (newest last). * @@ -48,15 +50,42 @@ public class NodeTimeseries { return new NodeTimeseries(hostname(), list); } - public NodeTimeseries filter(Predicate<NodeMetricSnapshot> filter) { - return new NodeTimeseries(hostname, snapshots.stream().filter(filter).collect(Collectors.toList())); + /** Returns the instant this changed to the given generation, or empty if no *change* to this generation is present */ + private Optional<Instant> generationChange(long targetGeneration) { + if (snapshots.isEmpty()) return Optional.empty(); + if (snapshots.get(0).generation() == targetGeneration) return Optional.of(snapshots.get(0).at()); + for (NodeMetricSnapshot snapshot : snapshots) { + if (snapshot.generation() == targetGeneration) + return Optional.of(snapshot.at()); + } + return Optional.empty(); + } + + public NodeTimeseries keep(Predicate<NodeMetricSnapshot> filter) { + return new NodeTimeseries(hostname, snapshots.stream() + .filter(snapshot -> filter.test(snapshot)) + .collect(Collectors.toList())); } - public NodeTimeseries justAfter(Instant oldestTime) { + public NodeTimeseries keepAfter(Instant oldestTime) { return new NodeTimeseries(hostname, snapshots.stream() .filter(snapshot -> snapshot.at().equals(oldestTime) || snapshot.at().isAfter(oldestTime)) .collect(Collectors.toList())); } + public NodeTimeseries keepCurrentGenerationAfterWarmup(long currentGeneration) { + Optional<Instant> generationChange = generationChange(currentGeneration); + return keep(snapshot -> isOnCurrentGenerationAfterWarmup(snapshot, currentGeneration, generationChange)); + } + + private boolean isOnCurrentGenerationAfterWarmup(NodeMetricSnapshot snapshot, + long currentGeneration, + Optional<Instant> generationChange) { + if (snapshot.generation() < 0) return true; // Content nodes do not yet send generation + if (snapshot.generation() < currentGeneration) return false; + if (generationChange.isEmpty()) return true; + return ! snapshot.at().isBefore(generationChange.get().plus(warmupDuration)); + } + } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java index 84d677abde6..eab9f755db2 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java @@ -82,10 +82,12 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer { applications().put(application.get().with(updatedCluster), lock); if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) { // 2. Also autoscale - logAutoscaling(advice.target().get(), applicationId, clusterNodes); + ClusterResources before = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources(); try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) { - if (deployment.isValid()) + if (deployment.isValid()) { deployment.activate(); + logAutoscaling(before, advice.target().get(), applicationId, clusterNodes); + } } } } @@ -121,10 +123,9 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer { return cluster.with(event.withCompletion(completionTime)); } - private void logAutoscaling(ClusterResources target, ApplicationId application, NodeList clusterNodes) { - ClusterResources current = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository()).advertisedResources(); - log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" + - "\nfrom " + toString(current) + "\nto " + toString(target)); + private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) { + log.info("Autoscaled " + application + " " + clusterNodes.clusterSpec() + ":" + + "\nfrom " + toString(from) + "\nto " + toString(to)); } static String toString(ClusterResources r) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java index 3b74533772b..fbc3d236421 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java @@ -79,7 +79,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer { allocatedNodes(lb.id()).isEmpty(), lb -> { try { attempts.add(1); - log.log(Level.INFO, () -> "Removing expired inactive load balancer " + lb.id()); + log.log(Level.INFO, () -> "Removing expired inactive " + lb.id()); service.remove(lb.id().application(), lb.id().cluster()); db.removeLoadBalancer(lb.id()); } catch (Exception e){ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java index 0c78a9adade..511d3efe313 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java @@ -206,6 +206,9 @@ class MaintenanceDeployment implements Closeable { if (nodeLock.node().status().preferToRetire() == preferToRetire) return false; + // Node is retiring, keep preferToRetire + if (nodeLock.node().allocation().get().membership().retired() && !preferToRetire) return false; + nodeRepository.nodes().write(nodeLock.node().withPreferToRetire(preferToRetire, agent, nodeRepository.clock().instant()), nodeLock); return true; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java index 6c103627ad4..57db874fb84 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java @@ -59,7 +59,7 @@ public abstract class NodeMover<MOVE> extends NodeRepositoryMaintainer { protected final MOVE findBestMove(NodesAndHosts<? extends NodeList> allNodes) { HostCapacity capacity = new HostCapacity(allNodes, nodeRepository().resourcesCalculator()); MOVE bestMove = emptyMove; - // Shuffle nodes so we did not get stuck if the chosen move is consistently discarded. Node moves happen through + // Shuffle nodes to not get stuck if the chosen move is consistently discarded. Node moves happen through // a soft request to retire (preferToRetire), which node allocation can disregard NodeList activeNodes = allNodes.nodes().nodeType(NodeType.tenant) .state(Node.State.active) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java index 7bea671fbac..f01e8ecd301 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java @@ -16,6 +16,7 @@ import java.time.Duration; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.logging.Logger; /** * Ensure that nodes within a cluster a spread across hosts on exclusive network switches. @@ -24,6 +25,8 @@ import java.util.Set; */ public class SwitchRebalancer extends NodeMover<Move> { + private static final Logger LOG = Logger.getLogger(SwitchRebalancer.class.getName()); + private final Metric metric; private final Deployer deployer; @@ -40,7 +43,12 @@ public class SwitchRebalancer extends NodeMover<Move> { NodesAndHosts<NodeList> allNodes = NodesAndHosts.create(nodeRepository().nodes().list()); // Lockless as strong consistency is not needed if (!zoneIsStable(allNodes.nodes())) return 1.0; - findBestMove(allNodes).execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); + Move bestMove = findBestMove(allNodes); + if (!bestMove.isEmpty()) { + LOG.info("Trying " + bestMove + " (" + bestMove.fromHost().switchHostname().orElse("<none>") + + " -> " + bestMove.toHost().switchHostname().orElse("<none>") + ")"); + } + bestMove.execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); return 1.0; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java index 9cdb4c69b97..2289ba4a0ea 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java @@ -48,6 +48,7 @@ public class ApplicationSerializer { private static final String exclusiveKey = "exclusive"; private static final String minResourcesKey = "min"; private static final String maxResourcesKey = "max"; + private static final String requiredKey = "required"; private static final String suggestedKey = "suggested"; private static final String resourcesKey = "resources"; private static final String targetResourcesKey = "target"; @@ -99,7 +100,6 @@ public class ApplicationSerializer { } private static Status statusFromSlime(Inspector statusObject) { - if ( ! statusObject.valid()) return Status.initial(); // TODO: Remove this line after March 2021 return new Status(statusObject.field(currentReadShareKey).asDouble(), statusObject.field(maxReadShareKey).asDouble()); } @@ -118,6 +118,7 @@ public class ApplicationSerializer { clusterObject.setBool(exclusiveKey, cluster.exclusive()); toSlime(cluster.minResources(), clusterObject.setObject(minResourcesKey)); toSlime(cluster.maxResources(), clusterObject.setObject(maxResourcesKey)); + clusterObject.setBool(requiredKey, cluster.required()); cluster.suggestedResources().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject(suggestedKey))); cluster.targetResources().ifPresent(target -> toSlime(target, clusterObject.setObject(targetResourcesKey))); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray(scalingEventsKey)); @@ -130,6 +131,7 @@ public class ApplicationSerializer { clusterObject.field(exclusiveKey).asBool(), clusterResourcesFromSlime(clusterObject.field(minResourcesKey)), clusterResourcesFromSlime(clusterObject.field(maxResourcesKey)), + clusterObject.field(requiredKey).asBool(), optionalSuggestionFromSlime(clusterObject.field(suggestedKey)), optionalClusterResourcesFromSlime(clusterObject.field(targetResourcesKey)), scalingEventsFromSlime(clusterObject.field(scalingEventsKey)), diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java index 9888a8df0ed..ba28d8e6b9a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java @@ -66,7 +66,6 @@ public class CuratorDatabaseClient { private static final Path inactiveJobsPath = root.append("inactiveJobs"); private static final Path infrastructureVersionsPath = root.append("infrastructureVersions"); private static final Path osVersionsPath = root.append("osVersions"); - private static final Path containerImagesPath = root.append("dockerImages"); private static final Path firmwareCheckPath = root.append("firmwareCheck"); private static final Path archiveUrisPath = root.append("archiveUris"); @@ -83,7 +82,6 @@ public class CuratorDatabaseClient { this.clock = clock; this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter")); initZK(); - curator.delete(containerImagesPath); // TODO(mpolden): Remove after 2021-11-08 } public List<String> cluster() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java index 8c421443a65..1c3d3f5c489 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java @@ -20,6 +20,7 @@ public class NodeResourcesSerializer { private static final String storageTypeKey = "storageType"; static void toSlime(NodeResources resources, Cursor resourcesObject) { + if (resources.isUnspecified()) return; resourcesObject.setDouble(vcpuKey, resources.vcpu()); resourcesObject.setDouble(memoryKey, resources.memoryGb()); resourcesObject.setDouble(diskKey, resources.diskGb()); @@ -29,6 +30,7 @@ public class NodeResourcesSerializer { } static NodeResources resourcesFromSlime(Inspector resources) { + if ( ! resources.field(vcpuKey).valid()) return NodeResources.unspecified(); return new NodeResources(resources.field(vcpuKey).asDouble(), resources.field(memoryKey).asDouble(), resources.field(diskKey).asDouble(), diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java index 0d32b21016c..8c358301b85 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java @@ -113,7 +113,8 @@ class Activator { var cluster = modified.cluster(clusterEntry.getKey()).get(); var previousResources = oldNodes.cluster(clusterEntry.getKey()).toResources(); var currentResources = clusterEntry.getValue().toResources(); - if ( ! previousResources.justNumbers().equals(currentResources.justNumbers())) { + if ( previousResources.nodeResources().isUnspecified() + || ! previousResources.justNumbers().equals(currentResources.justNumbers())) { cluster = cluster.with(ScalingEvent.create(previousResources, currentResources, generation, at)); } if (cluster.targetResources().isPresent() diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java index 839bc21827c..4088d717a67 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.provisioning; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.NodeResources; @@ -29,11 +30,22 @@ public class CapacityPolicies { this.sharedHosts = type -> PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value().isEnabled(type.name()); } - public int decideSize(int requested, Capacity capacity, ClusterSpec cluster, ApplicationId application) { - if (application.instance().isTester()) return 1; + public Capacity applyOn(Capacity capacity, ApplicationId application) { + return capacity.withLimits(applyOn(capacity.minResources(), capacity, application), + applyOn(capacity.maxResources(), capacity, application)); + } + + private ClusterResources applyOn(ClusterResources resources, Capacity capacity, ApplicationId application) { + int nodes = decideSize(resources.nodes(), capacity.isRequired(), application.instance().isTester()); + int groups = Math.min(resources.groups(), nodes); // cannot have more groups than nodes + var nodeResources = decideNodeResources(resources.nodeResources(), capacity.isRequired()); + return new ClusterResources(nodes, groups, nodeResources); + } - ensureRedundancy(requested, cluster, capacity.canFail()); - if (capacity.isRequired()) return requested; + private int decideSize(int requested, boolean required, boolean isTester) { + if (isTester) return 1; + + if (required) return requested; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requested, 3); @@ -43,11 +55,9 @@ public class CapacityPolicies { } } - public NodeResources decideNodeResources(NodeResources target, Capacity capacity, ClusterSpec cluster) { - if (target.isUnspecified()) - target = defaultNodeResources(cluster.type()); - - if (capacity.isRequired()) return target; + private NodeResources decideNodeResources(NodeResources target, boolean required) { + if (required) return target; + if (target.isUnspecified()) return target; // Cannot be modified // Dev does not cap the cpu or network of containers since usage is spotty: Allocate just a small amount exclusively if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning()) @@ -77,28 +87,11 @@ public class CapacityPolicies { } /** - * Whether or not the nodes requested can share physical host with other applications. + * Returns whether the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(Capacity capacity, boolean requestedExclusivity) { return requestedExclusivity && (capacity.isRequired() || zone.environment() == Environment.prod); } - /** - * Throw if the node count is 1 for container and content clusters and we're in a production zone - * - * @throws IllegalArgumentException if only one node is requested and we can fail - */ - private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail) { - if (canFail && - nodeCount == 1 && - requiresRedundancy(cluster.type()) && - zone.environment().isProduction()) - throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster); - } - - private static boolean requiresRedundancy(ClusterSpec.Type clusterType) { - return clusterType.isContent() || clusterType.isContainer(); - } - } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index ba46f0a9535..2d93763c631 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -71,7 +71,7 @@ public class GroupPreparer { // Try preparing in memory without global unallocated lock. Most of the time there should be no changes and we // can return nodes previously allocated. NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes, - indices::probeNext, wantedGroups, allNodesAndHosts); + indices::probeNext, wantedGroups, allNodesAndHosts); if (probeAllocation.fulfilledAndNoChanges()) { List<Node> acceptedNodes = probeAllocation.finalNodes(); surplusActiveNodes.removeAll(acceptedNodes); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java index 7cc4acc20b0..6c22a26d88a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java @@ -99,12 +99,12 @@ class NodeAllocation { * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * - * @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc. + * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. * @return the subset of offeredNodes which was accepted, with the correct allocation assigned */ - List<Node> offer(List<NodeCandidate> nodesPrioritized) { + List<Node> offer(List<NodeCandidate> candidates) { List<Node> accepted = new ArrayList<>(); - for (NodeCandidate candidate : nodesPrioritized) { + for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); @@ -121,7 +121,7 @@ class NodeAllocation { if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) - accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable)); + accepted.add(acceptNode(candidate, shouldRetire(candidate, candidates), resizeable)); } } else if (! saturated() && hasCompatibleFlavor(candidate)) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java index 4f0ae688b1c..6f7eeb75c03 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java @@ -139,6 +139,10 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; + // Choose nodes that are not preferred to retire + if (!this.preferToRetire() && other.preferToRetire()) return -1; + if (!other.preferToRetire() && this.preferToRetire()) return 1; + // Choose inactive nodes if (this.state() == Node.State.inactive && other.state() != Node.State.inactive) return -1; if (other.state() == Node.State.inactive && this.state() != Node.State.inactive) return 1; @@ -238,7 +242,6 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; - NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index 0ab04a1a73d..7d15a2b30b1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -84,8 +84,8 @@ public class NodeRepositoryProvisioner implements Provisioner { @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { - log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + - " for application " + application + ", cluster " + cluster); + log.log(Level.FINE, "Received deploy prepare request for " + requested + + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); @@ -96,17 +96,21 @@ public class NodeRepositoryProvisioner implements Provisioner { NodeResources resources; NodeSpec nodeSpec; if (requested.type() == NodeType.tenant) { - ClusterResources target = decideTargetResources(application, cluster, requested); - int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application); - groups = Math.min(target.groups(), nodeCount); // cannot have more groups than nodes - resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster); - boolean exclusive = capacityPolicies.decideExclusivity(requested, cluster.isExclusive()); - nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); - logIfDownscaled(target.nodes(), nodeCount, cluster, logger); + var actual = capacityPolicies.applyOn(requested, application); + ClusterResources target = decideTargetResources(application, cluster, actual); + boolean exclusive = capacityPolicies.decideExclusivity(actual, cluster.isExclusive()); + ensureRedundancy(target.nodes(), cluster, actual.canFail(), application); + logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger); + + groups = target.groups(); + resources = target.nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type()) + : target.nodeResources(); + nodeSpec = NodeSpec.from(target.nodes(), resources, exclusive, actual.canFail()); } else { groups = 1; // type request with multiple groups is not supported - resources = requested.minResources().nodeResources(); + resources = requested.minResources().nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type()) + : requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); @@ -141,7 +145,7 @@ public class NodeRepositoryProvisioner implements Provisioner { private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { try (Mutex lock = nodeRepository.nodes().lock(applicationId)) { var application = nodeRepository.applications().get(applicationId).orElse(Application.empty(applicationId)) - .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources()); + .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested); nodeRepository.applications().put(application, lock); var cluster = application.cluster(clusterSpec.id()).get(); return cluster.targetResources().orElseGet(() -> currentResources(application, clusterSpec, cluster, requested)); @@ -160,12 +164,20 @@ public class NodeRepositoryProvisioner implements Provisioner { boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment // start at min, preserve current resources otherwise - ? new AllocatableClusterResources(requested.minResources(), clusterSpec, nodeRepository) + ? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec), clusterSpec, nodeRepository) : new AllocatableClusterResources(nodes.asList(), nodeRepository); var clusterModel = new ClusterModel(application, cluster, clusterSpec, nodes, nodeRepository.metricsDb(), nodeRepository.clock()); return within(Limits.of(requested), currentResources, firstDeployment, clusterModel); } + private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec) { + var initial = requested.minResources(); + if (initial.nodeResources().isUnspecified()) + initial = initial.with(capacityPolicies.defaultNodeResources(clusterSpec.type())); + return initial; + } + + /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, AllocatableClusterResources current, @@ -186,10 +198,28 @@ public class NodeRepositoryProvisioner implements Provisioner { .advertisedResources(); } - private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { - if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) - logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + - ", downscaling to " + actualNodes + " nodes in " + zone.environment()); + /** + * Throw if the node count is 1 for container and content clusters and we're in a production zone + * + * @throws IllegalArgumentException if only one node is requested and we can fail + */ + private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail, ApplicationId application) { + if (! application.instance().isTester() && + canFail && + nodeCount == 1 && + requiresRedundancy(cluster.type()) && + zone.environment().isProduction()) + throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster); + } + + private static boolean requiresRedundancy(ClusterSpec.Type clusterType) { + return clusterType.isContent() || clusterType.isContainer(); + } + + private void logIfDownscaled(int requestedMinNodes, int actualMinNodes, ClusterSpec cluster, ProvisionLogger logger) { + if (zone.environment().isManuallyDeployed() && actualMinNodes < requestedMinNodes) + logger.log(Level.INFO, "Requested " + requestedMinNodes + " nodes for " + cluster + + ", downscaling to " + actualMinNodes + " nodes in " + zone.environment()); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 282b0d96cf4..c98ff31ecb6 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -10,7 +10,6 @@ import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.NodesAndHosts; -import com.yahoo.vespa.hosted.provision.node.Nodes; import java.util.ArrayList; import java.util.List; @@ -25,13 +24,11 @@ import java.util.stream.Collectors; */ class Preparer { - private final NodeRepository nodeRepository; private final GroupPreparer groupPreparer; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; public Preparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) { - this.nodeRepository = nodeRepository; this.loadBalancerProvisioner = loadBalancerProvisioner; this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner); } @@ -69,9 +66,10 @@ class Preparer { for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) { ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex))); - GroupPreparer.PrepareResult result = groupPreparer.prepare( - application, clusterGroup, requestedNodes.fraction(wantedGroups), - surplusNodes, indices, wantedGroups, allNodesAndHosts); + GroupPreparer.PrepareResult result = groupPreparer.prepare(application, clusterGroup, + requestedNodes.fraction(wantedGroups), + surplusNodes, indices, wantedGroups, + allNodesAndHosts); allNodesAndHosts = result.allNodesAndHosts; // Might have changed List<Node> accepted = result.prepared; if (requestedNodes.rejectNonActiveParent()) { @@ -98,7 +96,7 @@ class Preparer { * in groups with index number above or equal the group count */ private List<Node> findNodesInRemovableGroups(NodeList appNodes, ClusterSpec requestedCluster, int wantedGroups) { - List<Node> surplusNodes = new ArrayList<>(0); + List<Node> surplusNodes = new ArrayList<>(); for (Node node : appNodes.state(Node.State.active)) { ClusterSpec nodeCluster = node.allocation().get().membership().cluster(); if ( ! nodeCluster.id().equals(requestedCluster.id())) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java index 673f38229fa..fa6c44e6851 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java @@ -174,8 +174,8 @@ public class NodePatcher { clock.instant()); case "reports" : return nodeWithPatchedReports(node, value); - case "openStackId" : - return node.withOpenStackId(asString(value)); + case "id" : + return node.withId(asString(value)); case "diskGb": case "minDiskAvailableGb": return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble())), Agent.operator, clock.instant()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java index b7b334aaba4..4852f0f8269 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java @@ -121,14 +121,13 @@ class NodesResponse extends SlimeJsonResponse { object.setString("url", nodeParentUrl + node.hostname()); if ( ! allFields) return; - object.setString("id", node.hostname()); + object.setString("id", node.id()); object.setString("state", NodeSerializer.toString(node.state())); object.setString("type", NodeSerializer.toString(node.type())); object.setString("hostname", node.hostname()); if (node.parentHostname().isPresent()) { object.setString("parentHostname", node.parentHostname().get()); } - object.setString("openStackId", node.id()); object.setString("flavor", node.flavor().name()); node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value())); node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString("exclusiveTo", applicationId.serializedForm())); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java index 2253fcb9ac5..67bb69b6191 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java @@ -267,7 +267,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler { inspector.field("additionalHostnames").traverse((ArrayTraverser) (i, item) -> addressPool.add(new Address(item.asString()))); - Node.Builder builder = Node.create(inspector.field("openStackId").asString(), + Node.Builder builder = Node.create(inspector.field("id").asString(), IP.Config.of(ipAddresses, ipAddressPool, addressPool), inspector.field("hostname").asString(), flavorFromSlime(inspector), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java index 27233791cf1..3dd76c76cac 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java @@ -18,12 +18,15 @@ import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.Zone; import com.yahoo.config.provisioning.FlavorsConfig; +import com.yahoo.jdisc.test.MockMetric; import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.hosted.provision.maintenance.SwitchRebalancer; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver; import com.yahoo.vespa.hosted.provision.persistence.NodeSerializer; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; +import com.yahoo.vespa.hosted.provision.testutils.MockDeployer; import com.yahoo.vespa.model.builder.xml.dom.DomConfigPayloadBuilder; import org.junit.Ignore; import org.junit.Test; @@ -35,8 +38,11 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.logging.Logger; @@ -97,6 +103,16 @@ public class RealDataScenarioTest { deploy(tester, app, specs, capacities); tester.nodeRepository().nodes().list().owner(app).cluster(specs[1].id()).forEach(System.out::println); + + // Perform a node move + tester.clock().advance(Duration.ofHours(1)); // Enough time for deployment to not be considered deployed recently + List<MockDeployer.ClusterContext> contexts = new ArrayList<>(); + for (int i = 0; i < specs.length; i++) { + contexts.add(new MockDeployer.ClusterContext(app, specs[i], capacities[i])); + } + MockDeployer deployer = new MockDeployer(tester.provisioner(), tester.clock(), Map.of(app, new MockDeployer.ApplicationContext(app, contexts))); + SwitchRebalancer rebalancer = new SwitchRebalancer(tester.nodeRepository(), Duration.ofDays(1), new MockMetric(), deployer); + rebalancer.run(); } private void deploy(ProvisioningTester tester, ApplicationId app, ClusterSpec[] specs, Capacity[] capacities) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java index 4a0c2012ae4..667dffef2a6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.provision.autoscale; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; @@ -55,7 +56,7 @@ public class AutoscalingIntegrationTest { ClusterResources max = new ClusterResources(2, 1, nodes); Application application = tester.nodeRepository().applications().get(application1).orElse(Application.empty(application1)) - .withCluster(cluster1.id(), false, min, max); + .withCluster(cluster1.id(), false, Capacity.from(min, max)); try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) { tester.nodeRepository().applications().put(application, lock); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index a3c7b7d2d2b..a04a3828f13 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -15,6 +15,7 @@ import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.Nodelike; import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies; @@ -41,6 +42,7 @@ public class AutoscalingTest { new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -50,10 +52,10 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 5, 1, hostResources); tester.clock().advance(Duration.ofDays(1)); - assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.addCpuMeasurements(0.25f, 1f, 59, application1); - assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.clock().advance(Duration.ofDays(1)); tester.addCpuMeasurements(0.25f, 1f, 120, application1); @@ -61,10 +63,10 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high", 15, 1, 1.2, 28.6, 28.6, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); - assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -73,19 +75,19 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only assertTrue("Load change is large, but insufficient measurements for new config -> No change", - tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.addCpuMeasurements(0.19f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only - assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), min, max).target()); + assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1, capacity).target()); tester.addCpuMeasurements(0.1f, 1f, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down to minimum since usage has gone down significantly", 7, 1, 1.0, 66.7, 66.7, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents(); } @@ -96,6 +98,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -107,8 +110,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high", - 7, 1, 2.5, 80.0, 80.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 7, 1, 2.5, 80.0, 50.5, + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -117,8 +120,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down since cpu usage has gone down", - 4, 1, 2.5, 68.6, 68.6, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 4, 1, 2.5, 68.6, 27.4, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -142,9 +145,10 @@ public class AutoscalingTest { new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)); + var capacity = Capacity.from(min, max); ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high", 14, 1, 1.4, 30.8, 30.8, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); assertEquals("Disk speed from min/max is used", NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed()); tester.deploy(application1, cluster1, scaledResources); @@ -164,6 +168,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(1, 10, 10, 1); var min = new ClusterResources( 2, 1, resources.with(NodeResources.DiskSpeed.any)); var max = new ClusterResources( 10, 1, resources.with(NodeResources.DiskSpeed.any)); + var capacity = Capacity.from(min, max); tester.deploy(application1, cluster1, Capacity.from(min, max)); // Redeployment without target: Uses current resource numbers with *requested* non-numbers (i.e disk-speed any) @@ -176,7 +181,7 @@ public class AutoscalingTest { // Autoscaling: Uses disk-speed any as well tester.clock().advance(Duration.ofDays(2)); tester.addCpuMeasurements(0.8f, 1f, 120, application1); - Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), min, max); + Autoscaler.Advice advice = tester.autoscale(application1, cluster1, capacity); assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed()); @@ -187,6 +192,7 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(6, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -199,8 +205,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up to limit since resource usage is too high", - 6, 1, 2.4, 78.0, 79.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 6, 1, 2.4, 78.0, 70.0, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -208,6 +214,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1)); ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -218,7 +225,7 @@ public class AutoscalingTest { tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1); tester.assertResources("Scaling down to limit since resource usage is low", 4, 1, 1.8, 7.7, 10.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -226,6 +233,7 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(6, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, NodeResources.unspecified()); ClusterResources max = new ClusterResources( 6, 1, NodeResources.unspecified()); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -238,13 +246,14 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, Capacity.from(min, max)); tester.assertResources("Min number of nodes and default resources", 2, 1, defaultResources, - Optional.of(tester.nodeRepository().nodes().list().owner(application1).toResources())); + tester.nodeRepository().nodes().list().owner(application1).toResources()); tester.addMeasurements(0.25f, 0.95f, 0.95f, 0, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up to limit since resource usage is too high", - 4, 1, defaultResources, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 4, 1, + defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(), + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -252,6 +261,7 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(30.0, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -264,7 +274,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", 6, 6, 3.6, 8.0, 10.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -272,6 +282,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = min; + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources); ApplicationId application1 = tester.applicationId("application1"); @@ -281,13 +292,13 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 5, 1, resources); tester.clock().advance(Duration.ofDays(1)); tester.addCpuMeasurements(0.25f, 1f, 120, application1); - assertTrue(tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + assertTrue(tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test public void prefers_remote_disk_when_no_local_match() { - NodeResources resources = new NodeResources(3, 100, 100, 1); - ClusterResources min = new ClusterResources( 2, 1, new NodeResources(3, 100, 50, 1)); + NodeResources resources = new NodeResources(3, 100, 50, 1); + ClusterResources min = new ClusterResources( 2, 1, resources); ClusterResources max = min; // AutoscalingTester hardcodes 3Gb memory overhead: Flavor localFlavor = new Flavor("local", new NodeResources(3, 97, 75, 1, DiskSpeed.fast, StorageType.local)); @@ -305,15 +316,15 @@ public class AutoscalingTest { // deploy tester.deploy(application1, cluster1, 3, 1, min.nodeResources()); - tester.addDiskMeasurements(0.01f, 1f, 120, application1); - tester.clock().advance(Duration.ofMinutes(-10 * 5)); + Duration timeAdded = tester.addDiskMeasurements(0.01f, 1f, 120, application1); + tester.clock().advance(timeAdded.negated()); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> 10.0); // Query traffic only - Optional<ClusterResources> suggestion = tester.suggest(application1, cluster1.id(), min, max).target(); + Autoscaler.Advice suggestion = tester.suggest(application1, cluster1.id(), min, max); tester.assertResources("Choosing the remote disk flavor as it has less disk", 6, 1, 3.0, 100.0, 10.0, suggestion); assertEquals("Choosing the remote disk flavor as it has less disk", - StorageType.remote, suggestion.get().nodeResources().storageType()); + StorageType.remote, suggestion.target().get().nodeResources().storageType()); } @Test @@ -332,8 +343,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", - 7, 1, 2.5, 80.0, 80.0, - tester.suggest(application1, cluster1.id(), min, max).target()); + 7, 1, 2.5, 80.0, 50.5, + tester.suggest(application1, cluster1.id(), min, max)); } @Test @@ -341,6 +352,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -350,7 +362,7 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 2, 1, resources); tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1); assertTrue("Not scaling up since nodes were measured while cluster was unstable", - tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test @@ -358,6 +370,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -367,7 +380,7 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 2, 1, resources); tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1); assertTrue("Not scaling up since nodes were measured while cluster was unstable", - tester.autoscale(application1, cluster1.id(), min, max).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test @@ -375,6 +388,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(20, 20, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -386,8 +400,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", - 7, 7, 2.5, 80.0, 80.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 7, 7, 2.5, 80.0, 50.5, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -395,6 +409,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -408,8 +423,8 @@ public class AutoscalingTest { t -> t == 0 ? 20.0 : 10.0, t -> 1.0); tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper", - 8, 1, 2.6, 83.3, 83.3, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 8, 1, 2.6, 83.3, 52.6, + tester.autoscale(application1, cluster1, capacity)); } /** Same as above but mostly write traffic, which favors smaller groups */ @@ -418,6 +433,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -431,15 +447,16 @@ public class AutoscalingTest { t -> t == 0 ? 20.0 : 10.0, t -> 100.0); tester.assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper", - 4, 1, 2.1, 83.3, 83.3, - tester.autoscale(application1, cluster1.id(), min, max).target()); + 4, 1, 2.1, 83.3, 52.6, + tester.autoscale(application1, cluster1, capacity)); } @Test public void test_autoscaling_group_size() { NodeResources hostResources = new NodeResources(100, 1000, 1000, 100); - ClusterResources min = new ClusterResources( 3, 2, new NodeResources(1, 1, 1, 1)); + ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -453,7 +470,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Increase group size to reduce memory load", 8, 2, 12.4, 96.2, 62.5, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -461,6 +478,7 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(6, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -473,7 +491,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.02f, 0.95f, 120, application1); tester.assertResources("Scaling down", 6, 1, 2.9, 4.0, 95.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -481,6 +499,7 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(6, 100, 100, 1); ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(hostResources); ApplicationId application1 = tester.applicationId("application1"); @@ -492,7 +511,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.02f, 0.95f, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only - assertTrue(tester.autoscale(application1, cluster1.id(), min, max).target().isEmpty()); + assertTrue(tester.autoscale(application1, cluster1, capacity).target().isEmpty()); // Trying the same later causes autoscaling tester.clock().advance(Duration.ofDays(2)); @@ -501,7 +520,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down", 6, 1, 1.4, 4.0, 95.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -509,9 +528,12 @@ public class AutoscalingTest { NodeResources hostResources = new NodeResources(60, 100, 1000, 10); ClusterResources min = new ClusterResources(2, 1, new NodeResources( 2, 20, 200, 1)); ClusterResources max = new ClusterResources(4, 1, new NodeResources(60, 100, 1000, 1)); + var capacity = Capacity.from(min, max); { // No memory tax - AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(0)); + AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")), + hostResources, + new OnlySubtractingWhenForecastingCalculator(0)); ApplicationId application1 = tester.applicationId("app1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1"); @@ -522,11 +544,13 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up", 4, 1, 6.7, 20.5, 200, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } { // 15 Gb memory tax - AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(15)); + AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")), + hostResources, + new OnlySubtractingWhenForecastingCalculator(15)); ApplicationId application1 = tester.applicationId("app1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1"); @@ -537,7 +561,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up", 4, 1, 6.7, 35.5, 200, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } } @@ -545,6 +569,7 @@ public class AutoscalingTest { public void test_autoscaling_with_dynamic_provisioning() { ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1)); ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); + var capacity = Capacity.from(min, max); List<Flavor> flavors = new ArrayList<>(); flavors.add(new Flavor("aws-xlarge", new NodeResources(3, 200, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote))); flavors.add(new Flavor("aws-large", new NodeResources(3, 150, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote))); @@ -567,7 +592,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.9f, 0.6f, 120, application1); ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.", 8, 1, 3, 83, 34.3, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -578,7 +603,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down since resource usage has gone down", 5, 1, 3, 83, 36.0, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -586,6 +611,7 @@ public class AutoscalingTest { NodeResources resources = new NodeResources(3, 100, 100, 1); ClusterResources min = new ClusterResources( 1, 1, resources); ClusterResources max = new ClusterResources(10, 1, resources); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -599,18 +625,17 @@ public class AutoscalingTest { // (no read share stored) tester.assertResources("Advice to scale up since we set aside for bcp by default", 7, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.storeReadShare(0.25, 0.5, application1); tester.assertResources("Half of global share is the same as the default assumption used above", 7, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.storeReadShare(0.5, 0.5, application1); tester.assertResources("Advice to scale down since we don't need room for bcp", 4, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); - + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -620,42 +645,43 @@ public class AutoscalingTest { NodeResources maxResources = new NodeResources(10, 100, 100, 1); ClusterResources min = new ClusterResources(5, 1, minResources); ClusterResources max = new ClusterResources(5, 1, maxResources); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(maxResources.withVcpu(maxResources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); tester.deploy(application1, cluster1, 5, 1, midResources); - tester.addQueryRateMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + Duration timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); // (no query rate data) tester.assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data", 5, 1, 6.3, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5)); - tester.addQueryRateMeasurements(application1, cluster1.id(), - 100, - t -> 10.0 + (t < 50 ? t : 100 - t)); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), + 100, + t -> 10.0 + (t < 50 ? t : 100 - t)); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); tester.assertResources("Scale down since observed growth is slower than scaling time", 5, 1, 3.4, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.clearQueryRateMeasurements(application1, cluster1.id()); tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(60)); - tester.addQueryRateMeasurements(application1, cluster1.id(), - 100, - t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), + 100, + t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); tester.assertResources("Scale up since observed growth is faster than scaling time", 5, 1, 10.0, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -665,6 +691,7 @@ public class AutoscalingTest { NodeResources maxResources = new NodeResources(10, 100, 100, 1); ClusterResources min = new ClusterResources(5, 1, minResources); ClusterResources max = new ClusterResources(5, 1, maxResources); + var capacity = Capacity.from(min, max); AutoscalingTester tester = new AutoscalingTester(maxResources.withVcpu(maxResources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -681,54 +708,96 @@ public class AutoscalingTest { tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0); tester.assertResources("Query and write load is equal -> scale up somewhat", 5, 1, 7.3, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0); tester.assertResources("Query load is 4x write load -> scale up more", 5, 1, 9.5, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.3f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0); tester.assertResources("Write load is 10x query load -> scale down", 5, 1, 2.9, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0); tester.assertResources("Query only -> largest possible", 5, 1, 10.0, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> 0.0, t -> 10.0); tester.assertResources("Write only -> smallest possible", 5, 1, 2.1, 100, 100, - tester.autoscale(application1, cluster1.id(), min, max).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test - public void test_cd_autoscaling_test() { + public void test_autoscaling_in_dev() { NodeResources resources = new NodeResources(1, 4, 50, 1); - ClusterResources min = new ClusterResources( 2, 1, resources); + ClusterResources min = new ClusterResources( 1, 1, resources); ClusterResources max = new ClusterResources(3, 1, resources); - AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); + Capacity capacity = Capacity.from(min, max, false, true); + + AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2)); + ApplicationId application1 = tester.applicationId("application1"); + ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); + + tester.deploy(application1, cluster1, capacity); + tester.addQueryRateMeasurements(application1, cluster1.id(), + 500, t -> 100.0); + tester.addCpuMeasurements(1.0f, 1f, 10, application1); + assertTrue("Not attempting to scale up because policies dictate we'll only get one node", + tester.autoscale(application1, cluster1, capacity).target().isEmpty()); + } + + /** Same setup as test_autoscaling_in_dev(), just with required = true */ + @Test + public void test_autoscaling_in_dev_with_required_resources() { + NodeResources resources = new NodeResources(1, 4, 50, 1); + ClusterResources min = new ClusterResources( 1, 1, resources); + ClusterResources max = new ClusterResources(3, 1, resources); + Capacity capacity = Capacity.from(min, max, true, true); + + AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); - tester.deploy(application1, cluster1, 2, 1, resources); + tester.deploy(application1, cluster1, capacity); tester.addQueryRateMeasurements(application1, cluster1.id(), - 500, t -> 0.0); - tester.addCpuMeasurements(0.5f, 1f, 10, application1); + 500, t -> 100.0); + tester.addCpuMeasurements(1.0f, 1f, 10, application1); + tester.assertResources("We scale up even in dev because resources are required", + 3, 1, 1.0, 4, 50, + tester.autoscale(application1, cluster1, capacity)); + } + + @Test + public void test_autoscaling_in_dev_with_required_unspecified_resources() { + NodeResources resources = NodeResources.unspecified(); + ClusterResources min = new ClusterResources( 1, 1, resources); + ClusterResources max = new ClusterResources(3, 1, resources); + Capacity capacity = Capacity.from(min, max, true, true); - tester.assertResources("Advice to scale up since observed growth is much faster than scaling time", - 3, 1, 1, 4, 50, - tester.autoscale(application1, cluster1.id(), min, max).target()); + AutoscalingTester tester = new AutoscalingTester(Environment.dev, + new NodeResources(10, 16, 100, 2)); + ApplicationId application1 = tester.applicationId("application1"); + ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); + + tester.deploy(application1, cluster1, capacity); + tester.addQueryRateMeasurements(application1, cluster1.id(), + 500, t -> 100.0); + tester.addCpuMeasurements(1.0f, 1f, 10, application1); + tester.assertResources("We scale up even in dev because resources are required", + 3, 1, 1.5, 8, 50, + tester.autoscale(application1, cluster1, capacity)); } /** diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java index c45b6caf14c..8586704a426 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java @@ -24,10 +24,12 @@ import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.IP; +import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies; import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Optional; @@ -45,14 +47,23 @@ class AutoscalingTester { private final ProvisioningTester provisioningTester; private final Autoscaler autoscaler; private final MockHostResourcesCalculator hostResourcesCalculator; + private final CapacityPolicies capacityPolicies; /** Creates an autoscaling tester with a single host type ready */ public AutoscalingTester(NodeResources hostResources) { - this(hostResources, null); + this(Environment.prod, hostResources); } - public AutoscalingTester(NodeResources hostResources, HostResourcesCalculator resourcesCalculator) { - this(new Zone(Environment.prod, RegionName.from("us-east")), List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator); + public AutoscalingTester(Environment environment, NodeResources hostResources) { + this(new Zone(environment, RegionName.from("us-east")), hostResources, null); + } + + public AutoscalingTester(Zone zone, NodeResources hostResources) { + this(zone, hostResources, null); + } + + public AutoscalingTester(Zone zone, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) { + this(zone, List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator); provisioningTester.makeReadyNodes(20, "hostFlavor", NodeType.host, 8); provisioningTester.activateTenantHosts(); } @@ -71,6 +82,7 @@ class AutoscalingTester { hostResourcesCalculator = new MockHostResourcesCalculator(zone); autoscaler = new Autoscaler(nodeRepository()); + capacityPolicies = new CapacityPolicies(provisioningTester.nodeRepository()); } public ProvisioningTester provisioning() { return provisioningTester; } @@ -134,11 +146,11 @@ class AutoscalingTester { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { - clock().advance(Duration.ofMinutes(5)); + clock().advance(Duration.ofSeconds(150)); for (Node node : nodes) { Load load = new Load(value, ClusterModel.idealMemoryLoad * otherResourcesLoad, - ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); + ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), load, @@ -159,16 +171,18 @@ class AutoscalingTester { * @param otherResourcesLoad the load factor relative to ideal to use for other resources * @param count the number of measurements * @param applicationId the application we're adding measurements for all nodes of + * @return the duration added to the current time by this */ - public void addDiskMeasurements(float value, float otherResourcesLoad, - int count, ApplicationId applicationId) { + public Duration addDiskMeasurements(float value, float otherResourcesLoad, + int count, ApplicationId applicationId) { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); + Instant initialTime = clock().instant(); for (int i = 0; i < count; i++) { - clock().advance(Duration.ofMinutes(5)); + clock().advance(Duration.ofSeconds(150)); for (Node node : nodes) { Load load = new Load(ClusterModel.idealQueryCpuLoad * otherResourcesLoad, - ClusterModel.idealDiskLoad * otherResourcesLoad, + ClusterModel.idealContentDiskLoad * otherResourcesLoad, value).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), @@ -179,6 +193,7 @@ class AutoscalingTester { 0.0)))); } } + return Duration.between(initialTime, clock().instant()); } /** @@ -200,10 +215,10 @@ class AutoscalingTester { for (Node node : nodes) { float cpu = (float) 0.2 * otherResourcesLoad * oneExtraNodeFactor; float memory = value * oneExtraNodeFactor; - float disk = (float) ClusterModel.idealDiskLoad * otherResourcesLoad * oneExtraNodeFactor; + float disk = (float) ClusterModel.idealContentDiskLoad * otherResourcesLoad * oneExtraNodeFactor; Load load = new Load(0.2 * otherResourcesLoad, value, - ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); + ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), load, @@ -251,6 +266,7 @@ class AutoscalingTester { cluster.exclusive(), cluster.minResources(), cluster.maxResources(), + cluster.required(), cluster.suggestedResources(), cluster.targetResources(), List.of(), // Remove scaling events @@ -278,10 +294,11 @@ class AutoscalingTester { } /** Creates the given number of measurements, spaced 5 minutes between, using the given function */ - public void addQueryRateMeasurements(ApplicationId application, - ClusterSpec.Id cluster, - int measurements, - IntFunction<Double> queryRate) { + public Duration addQueryRateMeasurements(ApplicationId application, + ClusterSpec.Id cluster, + int measurements, + IntFunction<Double> queryRate) { + Instant initialTime = clock().instant(); for (int i = 0; i < measurements; i++) { nodeMetricsDb().addClusterMetrics(application, Map.of(cluster, new ClusterMetricSnapshot(clock().instant(), @@ -289,27 +306,28 @@ class AutoscalingTester { 0.0))); clock().advance(Duration.ofMinutes(5)); } + return Duration.between(initialTime, clock().instant()); } public void clearQueryRateMeasurements(ApplicationId application, ClusterSpec.Id cluster) { ((MemoryMetricsDb)nodeMetricsDb()).clearClusterMetrics(application, cluster); } - public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, - ClusterResources min, ClusterResources max) { + public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) { + capacity = capacityPolicies.applyOn(capacity, applicationId); Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId)) - .withCluster(clusterId, false, min, max); + .withCluster(cluster.id(), false, capacity); try (Mutex lock = nodeRepository().nodes().lock(applicationId)) { nodeRepository().applications().put(application, lock); } - return autoscaler.autoscale(application, application.clusters().get(clusterId), + return autoscaler.autoscale(application, application.clusters().get(cluster.id()), nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId, ClusterResources min, ClusterResources max) { Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId)) - .withCluster(clusterId, false, min, max); + .withCluster(clusterId, false, Capacity.from(min, max)); try (Mutex lock = nodeRepository().nodes().lock(applicationId)) { nodeRepository().applications().put(application, lock); } @@ -317,28 +335,37 @@ class AutoscalingTester { nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } - public ClusterResources assertResources(String message, - int nodeCount, int groupCount, - NodeResources expectedResources, - Optional<ClusterResources> resources) { - return assertResources(message, nodeCount, groupCount, - expectedResources.vcpu(), expectedResources.memoryGb(), expectedResources.diskGb(), - resources); + public void assertResources(String message, + int nodeCount, int groupCount, + NodeResources expectedResources, + ClusterResources resources) { + assertResources(message, nodeCount, groupCount, + expectedResources.vcpu(), expectedResources.memoryGb(), expectedResources.diskGb(), + resources); } public ClusterResources assertResources(String message, int nodeCount, int groupCount, double approxCpu, double approxMemory, double approxDisk, - Optional<ClusterResources> resources) { + Autoscaler.Advice advice) { + assertTrue("Resources are present: " + message + " (" + advice + ": " + advice.reason() + ")", + advice.target().isPresent()); + var resources = advice.target().get(); + assertResources(message, nodeCount, groupCount, approxCpu, approxMemory, approxDisk, resources); + return resources; + } + + public void assertResources(String message, + int nodeCount, int groupCount, + double approxCpu, double approxMemory, double approxDisk, + ClusterResources resources) { double delta = 0.0000000001; - assertTrue("Resources are present: " + message, resources.isPresent()); - NodeResources nodeResources = resources.get().nodeResources(); - assertEquals("Node count in " + resources.get() + ": " + message, nodeCount, resources.get().nodes()); - assertEquals("Group count in " + resources.get() + ": " + message, groupCount, resources.get().groups()); - assertEquals("Cpu in " + resources.get() + ": " + message, approxCpu, Math.round(nodeResources.vcpu() * 10) / 10.0, delta); - assertEquals("Memory in " + resources.get() + ": " + message, approxMemory, Math.round(nodeResources.memoryGb() * 10) / 10.0, delta); - assertEquals("Disk in: " + resources.get() + ": " + message, approxDisk, Math.round(nodeResources.diskGb() * 10) / 10.0, delta); - return resources.get(); + NodeResources nodeResources = resources.nodeResources(); + assertEquals("Node count in " + resources + ": " + message, nodeCount, resources.nodes()); + assertEquals("Group count in " + resources+ ": " + message, groupCount, resources.groups()); + assertEquals("Cpu in " + resources + ": " + message, approxCpu, Math.round(nodeResources.vcpu() * 10) / 10.0, delta); + assertEquals("Memory in " + resources + ": " + message, approxMemory, Math.round(nodeResources.memoryGb() * 10) / 10.0, delta); + assertEquals("Disk in: " + resources + ": " + message, approxDisk, Math.round(nodeResources.diskGb() * 10) / 10.0, delta); } public ManualClock clock() { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java index 0e37d953d2d..516a7a92d04 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java @@ -2,12 +2,12 @@ package com.yahoo.vespa.hosted.provision.autoscale; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.NodeResources; import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.applications.Application; -import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.Status; import org.junit.Test; @@ -15,7 +15,6 @@ import org.junit.Test; import java.time.Duration; import java.util.ArrayList; import java.util.List; -import java.util.Optional; import java.util.function.IntFunction; import static org.junit.Assert.assertEquals; @@ -31,19 +30,20 @@ public class ClusterModelTest { public void test_traffic_headroom() { ManualClock clock = new ManualClock(); Application application = Application.empty(ApplicationId.from("t1", "a1", "i1")); + ClusterSpec clusterSpec = clusterSpec(); Cluster cluster = cluster(new NodeResources(1, 10, 100, 1)); application = application.with(cluster); // No current traffic share: Ideal load is low but capped var model1 = new ClusterModel(application.with(new Status(0.0, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.131, model1.idealLoad().cpu(), delta); // Almost no current traffic share: Ideal load is low but capped var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.131, model2.idealLoad().cpu(), delta); @@ -54,33 +54,36 @@ public class ClusterModelTest { ManualClock clock = new ManualClock(); Application application = Application.empty(ApplicationId.from("t1", "a1", "i1")); + ClusterSpec clusterSpec = clusterSpec(); Cluster cluster = cluster(new NodeResources(1, 10, 100, 1)); application = application.with(cluster); // No current traffic: Ideal load is low but capped var model1 = new ClusterModel(application, - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.275, model1.idealLoad().cpu(), delta); // Almost no current traffic: Ideal load is low but capped var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0001, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.040, model2.idealLoad().cpu(), delta); } + private ClusterSpec clusterSpec() { + return ClusterSpec.specification(ClusterSpec.Type.content, ClusterSpec.Id.from("test")) + .group(ClusterSpec.Group.from(0)) + .vespaVersion("7.1.1") + .build(); + } + private Cluster cluster(NodeResources resources) { - return new Cluster(ClusterSpec.Id.from("test"), - false, - new ClusterResources(5, 1, resources), - new ClusterResources(5, 1, resources), - Optional.empty(), - Optional.empty(), - List.of(), - AutoscalingStatus.empty()); + return Cluster.create(ClusterSpec.Id.from("test"), + false, + Capacity.from(new ClusterResources(5, 1, resources))); } /** Creates the given number of measurements, spaced 5 minutes between, using the given function */ diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java index d1913a10b49..41851f6888e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java @@ -21,6 +21,9 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +/** + * @author bratseth + */ public class MetricsV2MetricsFetcherTest { private static final double delta = 0.00000001; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java index 4bda7b137a0..d9037181f59 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java @@ -5,8 +5,13 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; +import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; import com.yahoo.vespa.hosted.provision.testutils.MockDeployer; @@ -84,11 +89,9 @@ public class AutoscalingMaintainerTest { tester.deploy(app1, cluster1, app1Capacity); // Measure overload - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.9f, 0.9f, 0.9f, 0, 500, app1); // Causes autoscaling - tester.clock().advance(Duration.ofSeconds(1)); tester.clock().advance(Duration.ofMinutes(10)); Instant firstMaintenanceTime = tester.clock().instant(); tester.maintainer().maintain(); @@ -103,16 +106,12 @@ public class AutoscalingMaintainerTest { assertEquals(firstMaintenanceTime.toEpochMilli(), events.get(1).at().toEpochMilli()); // Measure overload still, since change is not applied, but metrics are discarded - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.9f, 0.9f, 0.9f, 0, 500, app1); - tester.clock().advance(Duration.ofSeconds(1)); tester.maintainer().maintain(); assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); // Measure underload, but no autoscaling since we still haven't measured we're on the new config generation - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.1f, 0.1f, 0.1f, 0, 500, app1); - tester.clock().advance(Duration.ofSeconds(1)); tester.maintainer().maintain(); assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); @@ -121,8 +120,9 @@ public class AutoscalingMaintainerTest { tester.clock().advance(Duration.ofMinutes(5)); tester.addMeasurements(0.1f, 0.1f, 0.1f, 1, 1, app1); tester.maintainer().maintain(); + assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); // - measure underload - tester.clock().advance(Duration.ofHours(1)); + tester.clock().advance(Duration.ofDays(4)); // Exit cooling period tester.addMeasurements(0.1f, 0.1f, 0.1f, 1, 500, app1); Instant lastMaintenanceTime = tester.clock().instant(); tester.maintainer().maintain(); @@ -191,7 +191,6 @@ public class AutoscalingMaintainerTest { var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, app1Capacity)); ManualClock clock = tester.clock(); - // deploy tester.deploy(app1, cluster1, app1Capacity); autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester); @@ -199,6 +198,77 @@ public class AutoscalingMaintainerTest { autoscale( true, Duration.ofMinutes(40), Duration.ofMinutes(20), clock, app1, cluster1, tester); } + @Test + public void test_autoscaling_ignores_high_cpu_right_after_generation_change() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + NodeResources resources = new NodeResources(4, 4, 10, 1); + ClusterResources min = new ClusterResources(2, 1, resources); + ClusterResources max = new ClusterResources(20, 1, resources); + var capacity = Capacity.from(min, max); + var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + + tester.deploy(app1, cluster1, capacity); + // fast completion + tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); + tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); + tester.maintainer().maintain(); + assertEquals("Scale up: " + tester.cluster(app1, cluster1).autoscalingStatus(), + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + + // fast completion, with initially overloaded cpu + tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 1, app1); + tester.addMeasurements(0.2f, 0.3f, 0.3f, 1, 1, app1); + tester.maintainer().maintain(); + assertEquals("No autoscaling since we ignore the (first) data point in the warup period", + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + } + + @Test + public void test_cd_autoscaling_test() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + NodeResources resources = new NodeResources(1, 4, 50, 1); + ClusterResources min = new ClusterResources( 2, 1, resources); + ClusterResources max = new ClusterResources(3, 1, resources); + var capacity = Capacity.from(min, max); + var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")), + new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + ManualClock clock = tester.clock(); + + tester.deploy(app1, cluster1, capacity); + assertEquals(2, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + + autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester); + assertEquals(3, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + } + + @Test + public void test_cd_test_not_specifying_node_resources() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + ClusterResources resources = new ClusterResources( 2, 1, NodeResources.unspecified()); + var capacity = Capacity.from(resources); + var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")), + new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + tester.deploy(app1, cluster1, capacity); // Deploy should succeed and allocate the nodes + assertEquals(2, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + } + private void autoscale(boolean down, Duration completionTime, Duration expectedWindow, ManualClock clock, ApplicationId application, ClusterSpec cluster, AutoscalingMaintainerTester tester) { @@ -209,7 +279,7 @@ public class AutoscalingMaintainerTest { clock.advance(completionTime); float load = down ? 0.1f : 1.0f; - tester.addMeasurements(load, load, load, generation, 200, application); + tester.addMeasurements(load, load, load, generation, 1, application); tester.maintainer().maintain(); assertEvent("Measured completion of the last scaling event, but no new autoscaling yet", generation, Optional.of(clock.instant()), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java index e36bd5e70bc..e1a1a2af5fb 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java @@ -42,9 +42,11 @@ public class AutoscalingMaintainerTester { private final MockDeployer deployer; public AutoscalingMaintainerTester(MockDeployer.ApplicationContext ... appContexts) { - provisioningTester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))) - .flavorsConfig(flavorsConfig()) - .build(); + this(new Zone(Environment.prod, RegionName.from("us-east3")), appContexts); + } + + public AutoscalingMaintainerTester(Zone zone, MockDeployer.ApplicationContext ... appContexts) { + provisioningTester = new ProvisioningTester.Builder().zone(zone).flavorsConfig(flavorsConfig()).build(); provisioningTester.clock().setInstant(Instant.ofEpochMilli(0)); Map<ApplicationId, MockDeployer.ApplicationContext> apps = Arrays.stream(appContexts) .collect(Collectors.toMap(c -> c.id(), c -> c)); @@ -69,8 +71,9 @@ public class AutoscalingMaintainerTester { return provisioningTester.deploy(application, cluster, capacity); } - public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { + public Duration addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); + Instant startTime = clock().instant(); for (int i = 0; i < count; i++) { for (Node node : nodes) nodeRepository().metricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), @@ -80,7 +83,9 @@ public class AutoscalingMaintainerTester { true, true, 0.0)))); + clock().advance(Duration.ofSeconds(150)); } + return Duration.between(startTime, clock().instant()); } /** Creates the given number of measurements, spaced 5 minutes between, using the given function */ @@ -102,7 +107,7 @@ public class AutoscalingMaintainerTester { private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); - b.addFlavor("flt", 30, 30, 40, 3, Flavor.Type.BARE_METAL); + b.addFlavor("flt", 30, 30, 50, 3, Flavor.Type.BARE_METAL); b.addFlavor("cpu", 40, 20, 40, 3, Flavor.Type.BARE_METAL); b.addFlavor("mem", 20, 40, 40, 3, Flavor.Type.BARE_METAL); return b.build(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java index 316655e11fb..7ce26354739 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java @@ -458,7 +458,7 @@ public class DynamicProvisioningMaintainerTest { // Provision config servers for (int i = 0; i < provisionedHosts.size(); i++) { - tester.makeReadyChildren(1, i + 1, NodeResources.unspecified(), hostType.childNodeType(), + tester.makeReadyChildren(1, i + 1, new NodeResources(1.5, 8, 50, 0.3), hostType.childNodeType(), provisionedHosts.get(i).hostname(), (nodeIndex) -> "cfg" + nodeIndex); } tester.prepareAndActivateInfraApplication(configSrvApp, hostType.childNodeType()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java index f01f98064a0..8c649243d61 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java @@ -183,7 +183,7 @@ public class MetricsReporterTest { // Allow 4 containers Set<String> ipAddressPool = Set.of("::2", "::3", "::4", "::5"); - Node dockerHost = Node.create("openStackId1", new IP.Config(Set.of("::1"), ipAddressPool), "dockerHost", + Node dockerHost = Node.create("node-id-1", new IP.Config(Set.of("::1"), ipAddressPool), "dockerHost", nodeFlavors.getFlavorOrThrow("host"), NodeType.host).build(); nodeRepository.nodes().addNodes(List.of(dockerHost), Agent.system); nodeRepository.nodes().deallocateRecursively("dockerHost", Agent.system, getClass().getSimpleName()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java index 03b41412896..b51f4403756 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java @@ -13,6 +13,7 @@ import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.Zone; import com.yahoo.config.provisioning.FlavorsConfig; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; @@ -24,6 +25,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; import org.junit.Test; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Optional; @@ -41,14 +43,13 @@ public class ScalingSuggestionsMaintainerTest { @Test public void testScalingSuggestionsMaintainer() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))).flavorsConfig(flavorsConfig()).build(); - + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))) + .flavorsConfig(flavorsConfig()) + .build(); ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ProvisioningTester.containerClusterSpec(); - ApplicationId app2 = ProvisioningTester.applicationId("app2"); + ClusterSpec cluster1 = ProvisioningTester.containerClusterSpec(); ClusterSpec cluster2 = ProvisioningTester.contentClusterSpec(); - tester.makeReadyNodes(20, "flt", NodeType.host, 8); tester.activateTenantHosts(); @@ -60,7 +61,8 @@ public class ScalingSuggestionsMaintainerTest { false, true)); tester.clock().advance(Duration.ofHours(13)); - addMeasurements(0.90f, 0.90f, 0.90f, 0, 500, app1, tester.nodeRepository()); + Duration timeAdded = addMeasurements(0.90f, 0.90f, 0.90f, 0, 500, app1, tester.nodeRepository()); + tester.clock().advance(timeAdded.negated()); addMeasurements(0.99f, 0.99f, 0.99f, 0, 500, app2, tester.nodeRepository()); ScalingSuggestionsMaintainer maintainer = new ScalingSuggestionsMaintainer(tester.nodeRepository(), @@ -68,7 +70,7 @@ public class ScalingSuggestionsMaintainerTest { new TestMetric()); maintainer.maintain(); - assertEquals("11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]", + assertEquals("12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app1, cluster1, tester).get().resources().toString()); assertEquals("8 nodes with [vcpu: 11.0, memory: 4.4 Gb, disk 11.8 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app2, cluster2, tester).get().resources().toString()); @@ -78,7 +80,7 @@ public class ScalingSuggestionsMaintainerTest { addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository()); maintainer.maintain(); assertEquals("Suggestion stays at the peak value observed", - "11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]", + "12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app1, cluster1, tester).get().resources().toString()); // Utilization is still way down and a week has passed tester.clock().advance(Duration.ofDays(7)); @@ -114,10 +116,11 @@ public class ScalingSuggestionsMaintainerTest { .shouldSuggestResources(currentResources); } - public void addMeasurements(float cpu, float memory, float disk, int generation, int count, - ApplicationId applicationId, - NodeRepository nodeRepository) { + public Duration addMeasurements(float cpu, float memory, float disk, int generation, int count, + ApplicationId applicationId, + NodeRepository nodeRepository) { NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId); + Instant startTime = nodeRepository.clock().instant(); for (int i = 0; i < count; i++) { for (Node node : nodes) nodeRepository.metricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), @@ -127,7 +130,9 @@ public class ScalingSuggestionsMaintainerTest { true, true, 0.0)))); + ((ManualClock)nodeRepository.clock()).advance(Duration.ofSeconds(150)); } + return Duration.between(startTime, nodeRepository.clock().instant()); } private FlavorsConfig flavorsConfig() { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java index f2f5869ece3..6be07f6f702 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java @@ -30,6 +30,7 @@ import java.util.stream.Stream; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; /** * @author mpolden @@ -92,11 +93,7 @@ public class SwitchRebalancerTest { rebalancedClusters.add(cluster); // Retired node becomes inactive and makes zone stable - try (var lock = tester.provisioner().lock(app)) { - NestedTransaction removeTransaction = new NestedTransaction(); - tester.nodeRepository().nodes().deactivate(retired.asList(), new ApplicationTransaction(lock, removeTransaction)); - removeTransaction.commit(); - } + deactivate(tester, retired); } assertEquals("Rebalanced all clusters", clusters, rebalancedClusters); @@ -134,21 +131,78 @@ public class SwitchRebalancerTest { // Rebalance tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); rebalancer.maintain(); - NodeList activeNodes = tester.nodeRepository().nodes().list().owner(app).cluster(spec.id()).state(Node.State.active); + NodeList activeNodes = nodesIn(spec.id(), tester).state(Node.State.active); NodeList retired = activeNodes.retired(); assertEquals("Node is retired", 1, retired.size()); assertFalse("Retired node was not on exclusive switch", nodesOnExclusiveSwitch.contains(retired.first().get())); tester.assertSwitches(Set.of(switch0, switch1, switch2), app, spec.id()); // Retired node becomes inactive and makes zone stable + deactivate(tester, retired); + + // Next iteration does nothing + tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); + assertNoMoves(rebalancer, tester); + } + + @Test + public void rebalancing_does_not_reuse_inactive_nodes() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); + ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("c1")).vespaVersion("1").build(); + Capacity capacity = Capacity.from(new ClusterResources(4, 1, new NodeResources(4, 8, 50, 1))); + MockDeployer deployer = deployer(tester, capacity, spec); + SwitchRebalancer rebalancer = new SwitchRebalancer(tester.nodeRepository(), Duration.ofDays(1), new TestMetric(), deployer); + + // Provision initial hosts on two switches + NodeResources hostResources = new NodeResources(8, 16, 500, 10); + String switch0 = "switch0"; + String switch1 = "switch1"; + provisionHosts(2, switch0, hostResources, tester); + provisionHosts(2, switch1, hostResources, tester); + + // Deploy application + deployer.deployFromLocalActive(app).get().activate(); + assertEquals("Nodes on " + switch0, 2, tester.activeNodesOn(switch0, app, spec.id()).size()); + assertEquals("Nodes on " + switch1, 2, tester.activeNodesOn(switch1, app, spec.id()).size()); + + // Two new hosts becomes available on a new switches + String switch2 = "switch2"; + String switch3 = "switch3"; + provisionHost(switch2, hostResources, tester); + provisionHost(switch3, hostResources, tester); + + // Rebalance retires one node and allocates another + tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); + rebalancer.maintain(); + tester.assertSwitches(Set.of(switch0, switch1, switch2), app, spec.id()); + NodeList retired = nodesIn(spec.id(), tester).state(Node.State.active).retired(); + assertEquals("Node is retired", 1, retired.size()); + deactivate(tester, retired); + + // Next rebalance does not reuse inactive node + tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); + rebalancer.maintain(); + assertSame("Inactive node is not re-activated", + Node.State.inactive, + nodesIn(spec.id(), tester).node(retired.first().get().hostname()).get().state()); + tester.assertSwitches(Set.of(switch0, switch1, switch2, switch3), app, spec.id()); + retired = nodesIn(spec.id(), tester).state(Node.State.active).retired(); + deactivate(tester, retired); + + // Next iteration does nothing + tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); + assertNoMoves(rebalancer, tester); + } + + private NodeList nodesIn(ClusterSpec.Id cluster, ProvisioningTester tester) { + return tester.nodeRepository().nodes().list().owner(app).cluster(cluster); + } + + private void deactivate(ProvisioningTester tester, NodeList retired) { try (var lock = tester.provisioner().lock(app)) { NestedTransaction removeTransaction = new NestedTransaction(); tester.nodeRepository().nodes().deactivate(retired.asList(), new ApplicationTransaction(lock, removeTransaction)); removeTransaction.commit(); } - - // Next iteration does nothing - tester.clock().advance(SwitchRebalancer.waitTimeAfterPreviousDeployment); - assertNoMoves(rebalancer, tester); } private void provisionHost(String switchHostname, NodeResources hostResources, ProvisioningTester tester) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java index 7266da9ff46..e34f63d8062 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.provision.persistence; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.NodeResources; @@ -33,6 +34,7 @@ public class ApplicationSerializerTest { false, new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)), new ClusterResources(12, 6, new NodeResources(3, 6, 21, 24)), + true, Optional.empty(), Optional.empty(), List.of(), @@ -42,6 +44,7 @@ public class ApplicationSerializerTest { true, new ClusterResources( 8, 4, minResources), new ClusterResources(14, 7, new NodeResources(3, 6, 21, 24)), + false, Optional.of(new Cluster.Suggestion(new ClusterResources(20, 10, new NodeResources(0.5, 4, 14, 16)), Instant.ofEpochMilli(1234L))), @@ -72,6 +75,7 @@ public class ApplicationSerializerTest { assertEquals(originalCluster.exclusive(), serializedCluster.exclusive()); assertEquals(originalCluster.minResources(), serializedCluster.minResources()); assertEquals(originalCluster.maxResources(), serializedCluster.maxResources()); + assertEquals(originalCluster.required(), serializedCluster.required()); assertEquals(originalCluster.suggestedResources(), serializedCluster.suggestedResources()); assertEquals(originalCluster.targetResources(), serializedCluster.targetResources()); assertEquals(originalCluster.scalingEvents(), serializedCluster.scalingEvents()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index 20546cc5bd9..95f25612dd7 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -523,7 +523,7 @@ public class ProvisioningTest { ApplicationId application = ProvisioningTester.applicationId(); tester.makeReadyHosts(10, defaultResources).activateTenantHosts(); - prepare(application, 1, 2, 3, 3, defaultResources, tester); + prepare(application, 1, 1, 1, 1, defaultResources, tester); } @Test @@ -1015,10 +1015,10 @@ public class ProvisioningTest { allHosts.addAll(content1); Function<Integer, Capacity> capacity = count -> Capacity.from(new ClusterResources(count, 1, NodeResources.unspecified()), required, true); - int expectedContainer0Size = tester.capacityPolicies().decideSize(container0Size, capacity.apply(container0Size), containerCluster0, application); - int expectedContainer1Size = tester.capacityPolicies().decideSize(container1Size, capacity.apply(container1Size), containerCluster1, application); - int expectedContent0Size = tester.capacityPolicies().decideSize(content0Size, capacity.apply(content0Size), contentCluster0, application); - int expectedContent1Size = tester.capacityPolicies().decideSize(content1Size, capacity.apply(content1Size), contentCluster1, application); + int expectedContainer0Size = tester.decideSize(capacity.apply(container0Size), application); + int expectedContainer1Size = tester.decideSize(capacity.apply(container1Size), application); + int expectedContent0Size = tester.decideSize(capacity.apply(content0Size), application); + int expectedContent1Size = tester.decideSize(capacity.apply(content1Size), application); assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct", expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index 6d525762ecc..c478840780f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -152,6 +152,10 @@ public class ProvisioningTester { public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); } public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); } + public int decideSize(Capacity capacity, ApplicationId application) { + return capacityPolicies.applyOn(capacity, application).minResources().nodes(); + } + public Node patchNode(Node node, UnaryOperator<Node> patcher) { return patchNodes(List.of(node), patcher).get(0); } @@ -489,6 +493,7 @@ public class ProvisioningTester { public List<Node> makeReadyNodes(int n, Flavor flavor, Optional<TenantName> reservedTo, NodeType type, int ipAddressPoolSize, boolean dualStack) { List<Node> nodes = makeProvisionedNodes(n, flavor, reservedTo, type, ipAddressPoolSize, dualStack); nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName()); + nodes.forEach(node -> { if (node.resources().isUnspecified()) throw new IllegalArgumentException(); }); return nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java index cb0bb7c1ac3..7c69d1f5c28 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java @@ -198,7 +198,7 @@ public class NodesV2ApiTest { Utf8.toBytes("{\"currentVespaVersion\": \"6.43.0\",\"currentDockerImage\": \"docker-registry.domain.tld:8080/dist/vespa:6.45.0\"}"), Request.Method.PATCH), "{\"message\":\"Updated host4.yahoo.com\"}"); assertResponse(new Request("http://localhost:8080/nodes/v2/node/host4.yahoo.com", - Utf8.toBytes("{\"openStackId\": \"patched-openstackid\"}"), Request.Method.PATCH), + Utf8.toBytes("{\"id\": \"patched-id\"}"), Request.Method.PATCH), "{\"message\":\"Updated host4.yahoo.com\"}"); assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", Utf8.toBytes("{\"modelName\": \"foo\"}"), Request.Method.PATCH), @@ -383,7 +383,7 @@ public class NodesV2ApiTest { @Test public void post_controller_node() throws Exception { - String data = "[{\"hostname\":\"controller1.yahoo.com\", \"openStackId\":\"fake-controller1.yahoo.com\"," + + String data = "[{\"hostname\":\"controller1.yahoo.com\", \"id\":\"fake-controller1.yahoo.com\"," + createIpAddresses("127.0.0.1") + "\"flavor\":\"default\"" + ", \"type\":\"controller\"}]"; @@ -873,14 +873,14 @@ public class NodesV2ApiTest { String host = "parent2.yahoo.com"; // Test adding with overrides tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node", - ("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + + ("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"id\":\"osid-123\"," + "\"flavor\":\"large-variant\",\"resources\":{\"diskGb\":1234,\"memoryGb\":4321}}]").getBytes(StandardCharsets.UTF_8), Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Can only override disk GB for configured flavor\"}"); assertResponse(new Request("http://localhost:8080/nodes/v2/node", - ("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + + ("[{\"hostname\":\"" + host + "\"," + createIpAddresses("::1") + "\"id\":\"osid-123\"," + "\"flavor\":\"large-variant\",\"type\":\"host\",\"resources\":{\"diskGb\":1234}}]"). getBytes(StandardCharsets.UTF_8), Request.Method.POST), @@ -892,7 +892,7 @@ public class NodesV2ApiTest { String tenant = "node-1-3.yahoo.com"; String resources = "\"resources\":{\"vcpu\":64.0,\"memoryGb\":128.0,\"diskGb\":1234.0,\"bandwidthGbps\":15.0,\"diskSpeed\":\"slow\",\"storageType\":\"remote\"}"; assertResponse(new Request("http://localhost:8080/nodes/v2/node", - ("[{\"hostname\":\"" + tenant + "\"," + createIpAddresses("::2") + "\"openStackId\":\"osid-124\"," + + ("[{\"hostname\":\"" + tenant + "\"," + createIpAddresses("::2") + "\"id\":\"osid-124\"," + "\"type\":\"tenant\"," + resources + "}]"). getBytes(StandardCharsets.UTF_8), Request.Method.POST), @@ -920,14 +920,14 @@ public class NodesV2ApiTest { String resources = "\"resources\":{\"vcpu\":5.0,\"memoryGb\":4321.0,\"diskGb\":1234.0,\"bandwidthGbps\":0.3,\"diskSpeed\":\"slow\",\"storageType\":\"local\"}"; // Test adding new node with resources tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node", - ("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + + ("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"id\":\"osid-123\"," + resources.replace("\"memoryGb\":4321.0,", "") + "}]").getBytes(StandardCharsets.UTF_8), Request.Method.POST), 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Required field 'memoryGb' is missing\"}"); assertResponse(new Request("http://localhost:8080/nodes/v2/node", - ("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"openStackId\":\"osid-123\"," + resources + "}]") + ("[{\"hostname\":\"" + hostname + "\"," + createIpAddresses("::1") + "\"id\":\"osid-123\"," + resources + "}]") .getBytes(StandardCharsets.UTF_8), Request.Method.POST), "{\"message\":\"Added 1 nodes to the provisioned state\"}"); @@ -1029,13 +1029,13 @@ public class NodesV2ApiTest { private static String asDockerNodeJson(String hostname, NodeType nodeType, String parentHostname, String... ipAddress) { return "{\"hostname\":\"" + hostname + "\", \"parentHostname\":\"" + parentHostname + "\"," + createIpAddresses(ipAddress) + - "\"openStackId\":\"" + hostname + "\",\"flavor\":\"d-1-1-100\"" + + "\"id\":\"" + hostname + "\",\"flavor\":\"d-1-1-100\"" + (nodeType != NodeType.tenant ? ",\"type\":\"" + nodeType + "\"" : "") + "}"; } private static String asNodeJson(String hostname, String flavor, String... ipAddress) { - return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," + + return "{\"hostname\":\"" + hostname + "\", \"id\":\"" + hostname + "\"," + createIpAddresses(ipAddress) + "\"flavor\":\"" + flavor + "\"}"; } @@ -1048,7 +1048,7 @@ public class NodesV2ApiTest { private static String asNodeJson(String hostname, NodeType nodeType, String flavor, Optional<TenantName> reservedTo, Optional<ApplicationId> exclusiveTo, Optional<String> switchHostname, List<String> additionalHostnames, String... ipAddress) { - return "{\"hostname\":\"" + hostname + "\", \"openStackId\":\"" + hostname + "\"," + + return "{\"hostname\":\"" + hostname + "\", \"id\":\"" + hostname + "\"," + createIpAddresses(ipAddress) + "\"flavor\":\"" + flavor + "\"" + (reservedTo.map(tenantName -> ", \"reservedTo\":\"" + tenantName.value() + "\"").orElse("")) + diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json index 689b6f3816b..fcdcdf1a8ca 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json @@ -72,7 +72,7 @@ "idealMemory": 0.65, "currentMemory": 0.0, "disk" : 0.0, - "idealDisk": 0.6, + "idealDisk": 0.95, "currentDisk": 0.0 }, "scalingEvents" : [ diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json index fb2b32fac20..0cf4c0ea005 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/cfg1.yahoo.com", - "id": "cfg1.yahoo.com", + "id": "cfg1", "state": "ready", "type": "config", "hostname": "cfg1.yahoo.com", - "openStackId": "cfg1", "flavor": "default", "cpuCores": 2.0, "resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json index eccde886d7e..730fd65ea66 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/cfg2.yahoo.com", - "id": "cfg2.yahoo.com", + "id": "cfg2", "state": "ready", "type": "config", "hostname": "cfg2.yahoo.com", - "openStackId": "cfg2", "flavor": "default", "cpuCores": 2.0, "resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json index 1631e46fc32..a41f41e657a 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/controller1.yahoo.com", - "id": "controller1.yahoo.com", + "id": "fake-controller1.yahoo.com", "state": "provisioned", "type": "controller", "hostname": "controller1.yahoo.com", - "openStackId": "fake-controller1.yahoo.com", "flavor": "default", "cpuCores": 2.0, "resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json index 33b7a91fc61..c9784c7e610 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/test-node-pool-102-2", - "id": "test-node-pool-102-2", + "id": "fake-test-node-pool-102-2", "state": "active", "type": "tenant", "hostname": "test-node-pool-102-2", "parentHostname": "dockerhost3.yahoo.com", - "openStackId": "fake-test-node-pool-102-2", "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote]", "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote"}, "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json index ec46755b5ac..5f55fac0af2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json index c44b172633d..a1edf545a30 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json index df745729288..00cbe0d8ab6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json index f6ff292c830..3a13d53e148 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json index 335a95f47f5..b1b5d22e361 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json index 4b7eecdfc26..5cf7b85916b 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json index e0a7448c111..c7fe1b13465 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com", - "id": "dockerhost2.yahoo.com", + "id": "dockerhost2", "state": "active", "type": "host", "hostname": "dockerhost2.yahoo.com", - "openStackId": "dockerhost2", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json index 21f3a54311f..ff57e544ae3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost3.yahoo.com", - "id": "dockerhost3.yahoo.com", + "id": "dockerhost3", "state": "active", "type": "host", "hostname": "dockerhost3.yahoo.com", - "openStackId": "dockerhost3", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json index 789b0580357..7fd0db18228 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost4.yahoo.com", - "id": "dockerhost4.yahoo.com", + "id": "dockerhost4", "state": "active", "type": "host", "hostname": "dockerhost4.yahoo.com", - "openStackId": "dockerhost4", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json index 35ec6fc0273..3ffee1433c3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost5.yahoo.com", - "id": "dockerhost5.yahoo.com", + "id": "dockerhost5", "state": "active", "type": "host", "hostname": "dockerhost5.yahoo.com", - "openStackId": "dockerhost5", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json index 498780a2eba..1b2c2934f24 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", - "id": "dockerhost1.yahoo.com", + "id": "dockerhost1", "state": "active", "type": "host", "hostname": "dockerhost1.yahoo.com", - "openStackId": "dockerhost1", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json index 1ba16743e25..e7f59dbb9d6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/dockerhost6.yahoo.com", - "id": "dockerhost6.yahoo.com", + "id": "dockerhost6", "state": "deprovisioned", "type": "host", "hostname": "dockerhost6.yahoo.com", - "openStackId": "dockerhost6", "flavor": "large", "cpuCores": 4.0, "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json index a35e8aa638b..13e0ecf1c65 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host1.yahoo.com", - "id": "host1.yahoo.com", + "id": "node1", "state": "active", "type": "tenant", "hostname": "host1.yahoo.com", - "openStackId": "node1", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json index 5d98c75f346..fc93ea7254f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host10.yahoo.com", - "id": "host10.yahoo.com", + "id": "node10", "state": "active", "type": "tenant", "hostname": "host10.yahoo.com", "parentHostname": "parent1.yahoo.com", - "openStackId": "node10", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json index 4cfb5ac8e78..a728d3a9828 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json @@ -5,7 +5,6 @@ "type": "tenant", "hostname": "host11.yahoo.com", "parentHostname": "parent.host.yahoo.com", - "openStackId": "host11.yahoo.com", "flavor": "[vcpu: 1.0, memory: 1.0 Gb, disk 100.0 Gb, bandwidth: 0.3 Gbps]", "resources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any"}, "realResources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json index 477a87124e7..2686982b73f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host13.yahoo.com", - "id": "host13.yahoo.com", + "id": "node13", "state": "active", "type": "tenant", "hostname": "host13.yahoo.com", - "openStackId": "node13", "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json index fd533b6372c..eb1b95efc99 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host14.yahoo.com", - "id": "host14.yahoo.com", + "id": "node14", "state": "active", "type": "tenant", "hostname": "host14.yahoo.com", - "openStackId": "node14", "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json index e288aaccc28..b1523843d4c 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host2.yahoo.com", - "id": "host2.yahoo.com", + "id": "node2", "state": "active", "type": "tenant", "hostname": "host2.yahoo.com", - "openStackId": "node2", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json index ac3e9ef96b0..68490b3ed34 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host3.yahoo.com", - "id": "host3.yahoo.com", + "id": "node3", "state": "ready", "type": "tenant", "hostname": "host3.yahoo.com", - "openStackId": "node3", "flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json index 7770227f5f8..88305c3d92d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com", - "id": "host4.yahoo.com", + "id": "patched-id", "state": "active", "type": "tenant", "hostname": "host4.yahoo.com", "parentHostname": "parent.yahoo.com", - "openStackId": "patched-openstackid", "flavor": "d-2-8-100", "cpuCores": 2.0, "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json index 165e2480340..05976d95edc 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com", - "id": "host4.yahoo.com", + "id": "node4", "state": "active", "type": "tenant", "hostname": "host4.yahoo.com", "parentHostname": "dockerhost1.yahoo.com", - "openStackId": "node4", "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json index ceb9d88763f..80deeb501e1 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com", - "id": "host4.yahoo.com", + "id": "node4", "state": "active", "type": "tenant", "hostname": "host4.yahoo.com", "parentHostname": "dockerhost1.yahoo.com", - "openStackId": "node4", "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json index 62ecb313588..a4abb878ed7 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host5.yahoo.com", - "id": "host5.yahoo.com", + "id": "node5", "state": "failed", "type": "tenant", "hostname": "host5.yahoo.com", "parentHostname": "dockerhost2.yahoo.com", - "openStackId": "node5", "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote]", "resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"}, "realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json index 55c16c23a0a..fc5b2ac15c9 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json @@ -1,11 +1,10 @@ { "url": "http://localhost:8080/nodes/v2/node/host5.yahoo.com", - "id": "host5.yahoo.com", + "id": "node5", "state": "failed", "type": "tenant", "hostname": "host5.yahoo.com", "parentHostname": "dockerhost2.yahoo.com", - "openStackId": "node5", "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote]", "resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"}, "realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json index 2ac665bf742..a7a6a06ae0f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host55.yahoo.com", - "id": "host55.yahoo.com", + "id": "node55", "state": "parked", "type": "tenant", "hostname": "host55.yahoo.com", - "openStackId": "node55", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json index 1e64c8a8edd..561629b63c1 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host6.yahoo.com", - "id": "host6.yahoo.com", + "id": "node6", "state": "active", "type": "tenant", "hostname": "host6.yahoo.com", - "openStackId": "node6", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json index 051325612d6..7358538ea33 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json @@ -1,10 +1,9 @@ { "url": "http://localhost:8080/nodes/v2/node/host7.yahoo.com", - "id": "host7.yahoo.com", + "id": "node7", "state": "provisioned", "type": "tenant", "hostname": "host7.yahoo.com", - "openStackId": "node7", "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local]", "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json index 11eb5149fbc..b27e6afc175 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json @@ -4,7 +4,6 @@ "state": "provisioned", "type": "tenant", "hostname": "host8.yahoo.com", - "openStackId": "host8.yahoo.com", "flavor": "default", "cpuCores": 2.0, "resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType": "remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json index a1eb16195d2..eb8b3ced02c 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json @@ -4,7 +4,6 @@ "state": "provisioned", "type": "host", "hostname": "host9.yahoo.com", - "openStackId": "host9.yahoo.com", "flavor": "large-variant", "cpuCores": 64.0, "resources":{"vcpu":64.0,"memoryGb":128.0,"diskGb":2000.0,"bandwidthGbps":15.0,"diskSpeed":"fast","storageType":"remote"}, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json index acf481cd9c9..983e294909e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json @@ -4,7 +4,6 @@ "state": "provisioned", "type": "host", "hostname": "parent2.yahoo.com", - "openStackId": "parent2.yahoo.com", "flavor": "large-variant", "reservedTo": "myTenant", "exclusiveTo": "tenant1:app1:instance1", diff --git a/parent/pom.xml b/parent/pom.xml index 67f0d25a50d..4b65bf82916 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -812,6 +812,11 @@ </dependency> <dependency> <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter-api</artifactId> + <version>${junit.version}</version> + </dependency> + <dependency> + <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-engine</artifactId> <version>${junit.version}</version> </dependency> @@ -874,10 +879,10 @@ <aircompressor.version>0.21</aircompressor.version> <airline.version>0.7</airline.version> <antlr.version>3.5.2</antlr.version> - <antlr4.version>4.9.3</antlr4.version> + <antlr4.version>4.5</antlr4.version> <apache.httpclient.version>4.5.13</apache.httpclient.version> <apache.httpcore.version>4.4.13</apache.httpcore.version> - <apache.httpclient5.version>5.1.1</apache.httpclient5.version> + <apache.httpclient5.version>5.1.2</apache.httpclient5.version> <asm.version>9.2</asm.version> <!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories --> @@ -897,7 +902,7 @@ <commons.math3.version>3.6.1</commons.math3.version> <gson.version>2.8.9</gson.version> <jna.version>5.9.0</jna.version> - <junit.version>5.7.0</junit.version> + <junit.version>5.8.1</junit.version> <maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version> <maven-bundle-plugin.version>5.1.2</maven-bundle-plugin.version> <maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version> @@ -918,7 +923,7 @@ <protobuf.version>3.11.4</protobuf.version> <spifly.version>1.3.3</spifly.version> <surefire.version>2.22.2</surefire.version> - <zookeeper.client.version>3.6.3</zookeeper.client.version> + <zookeeper.client.version>3.7.0</zookeeper.client.version> <doclint>all</doclint> <test.hide>true</test.hide> diff --git a/persistence/src/tests/dummyimpl/dummyimpltest.cpp b/persistence/src/tests/dummyimpl/dummyimpltest.cpp index e2b07a03e2b..4ff851e1735 100644 --- a/persistence/src/tests/dummyimpl/dummyimpltest.cpp +++ b/persistence/src/tests/dummyimpl/dummyimpltest.cpp @@ -15,7 +15,7 @@ struct DummyPersistenceFactory : public ConformanceTest::PersistenceFactory { using Repo = document::DocumentTypeRepo; std::unique_ptr<PersistenceProvider> - getPersistenceImplementation(const std::shared_ptr<const Repo>& repo, const Repo::DocumenttypesConfig&) override { + getPersistenceImplementation(const std::shared_ptr<const Repo>& repo, const DocumenttypesConfig&) override { return std::make_unique<dummy::DummyPersistence>(repo); } diff --git a/persistence/src/tests/spi/clusterstatetest.cpp b/persistence/src/tests/spi/clusterstatetest.cpp index 8a303c1a1ac..ac67903244f 100644 --- a/persistence/src/tests/spi/clusterstatetest.cpp +++ b/persistence/src/tests/spi/clusterstatetest.cpp @@ -233,4 +233,31 @@ TEST(ClusterStateTest, can_infer_own_node_retired_state) EXPECT_TRUE(!node_marked_as_retired_in_state("distributor:3 storage:3 .1.s:r", d, 0)); } +namespace { + +bool +node_marked_as_maintenance_in_state(const std::string& stateStr, + const lib::Distribution& d, + uint16_t node, + bool maintenance_in_all_spaces) +{ + lib::ClusterState s(stateStr); + ClusterState state(s, node, d, maintenance_in_all_spaces); + return state.nodeMaintenance(); +} + +} + +// We want to track the maintenance state for the _node_, not just the _bucket space_. +TEST(ClusterStateTest, node_maintenance_state_is_set_independent_of_bucket_space_state_string) +{ + lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3)); + + // Note: it doesn't actually matter what the cluster state string itself says here + EXPECT_FALSE(node_marked_as_maintenance_in_state("distributor:3 storage:3", d, 0, false)); + EXPECT_TRUE(node_marked_as_maintenance_in_state("distributor:3 storage:3", d, 0, true)); + EXPECT_TRUE(node_marked_as_maintenance_in_state("distributor:3 storage:3 .0.s:d", d, 0, true)); + EXPECT_FALSE(node_marked_as_maintenance_in_state("distributor:3 storage:3 .0.s:m", d, 0, false)); +} + } diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.h b/persistence/src/vespa/persistence/conformancetest/conformancetest.h index 5c3f20b1900..e0ef1652163 100644 --- a/persistence/src/vespa/persistence/conformancetest/conformancetest.h +++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.h @@ -8,6 +8,7 @@ */ #pragma once +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/persistence/spi/persistenceprovider.h> #include <gtest/gtest.h> @@ -19,8 +20,6 @@ class TestDocMan; } -namespace document::internal { class InternalDocumenttypesType; } - namespace storage::spi { class ConformanceTest : public ::testing::Test { @@ -29,7 +28,6 @@ public: using PersistenceProviderUP = std::unique_ptr<PersistenceProvider>; struct PersistenceFactory { typedef std::unique_ptr<PersistenceFactory> UP; - using DocumenttypesConfig = const document::internal::InternalDocumenttypesType; virtual ~PersistenceFactory() = default; virtual PersistenceProviderUP getPersistenceImplementation( diff --git a/persistence/src/vespa/persistence/spi/clusterstate.cpp b/persistence/src/vespa/persistence/spi/clusterstate.cpp index 4bc538996ca..f82e6165fb8 100644 --- a/persistence/src/vespa/persistence/spi/clusterstate.cpp +++ b/persistence/src/vespa/persistence/spi/clusterstate.cpp @@ -14,10 +14,12 @@ namespace storage::spi { ClusterState::ClusterState(const lib::ClusterState& state, uint16_t nodeIndex, - const lib::Distribution& distribution) + const lib::Distribution& distribution, + bool maintenanceInAllSpaces) : _state(std::make_unique<lib::ClusterState>(state)), _distribution(std::make_unique<lib::Distribution>(distribution.serialize())), - _nodeIndex(nodeIndex) + _nodeIndex(nodeIndex), + _maintenanceInAllSpaces(maintenanceInAllSpaces) { } @@ -33,14 +35,11 @@ void ClusterState::deserialize(vespalib::nbostream& i) { _distribution = std::make_unique<lib::Distribution>(distribution); } -ClusterState::ClusterState(vespalib::nbostream& i) { - deserialize(i); -} - ClusterState::ClusterState(const ClusterState& other) { vespalib::nbostream o; other.serialize(o); deserialize(o); + _maintenanceInAllSpaces = other._maintenanceInAllSpaces; } ClusterState::~ClusterState() = default; @@ -68,28 +67,32 @@ ClusterState::shouldBeReady(const Bucket& b) const { return Trinary::False; } -bool ClusterState::clusterUp() const { +bool ClusterState::clusterUp() const noexcept { return _state && _state->getClusterState() == lib::State::UP; } -bool ClusterState::nodeHasStateOneOf(const char* states) const { +bool ClusterState::nodeHasStateOneOf(const char* states) const noexcept { return _state && _state->getNodeState(lib::Node(lib::NodeType::STORAGE, _nodeIndex)). getState().oneOf(states); } -bool ClusterState::nodeUp() const { +bool ClusterState::nodeUp() const noexcept { return nodeHasStateOneOf("uir"); } -bool ClusterState::nodeInitializing() const { +bool ClusterState::nodeInitializing() const noexcept { return nodeHasStateOneOf("i"); } -bool ClusterState::nodeRetired() const { +bool ClusterState::nodeRetired() const noexcept { return nodeHasStateOneOf("r"); } +bool ClusterState::nodeMaintenance() const noexcept { + return _maintenanceInAllSpaces; +} + void ClusterState::serialize(vespalib::nbostream& o) const { assert(_distribution); assert(_state); diff --git a/persistence/src/vespa/persistence/spi/clusterstate.h b/persistence/src/vespa/persistence/spi/clusterstate.h index 8e48758e243..bde7e5bdbf4 100644 --- a/persistence/src/vespa/persistence/spi/clusterstate.h +++ b/persistence/src/vespa/persistence/spi/clusterstate.h @@ -23,9 +23,9 @@ public: ClusterState(const lib::ClusterState& state, uint16_t nodeIndex, - const lib::Distribution& distribution); + const lib::Distribution& distribution, + bool maintenanceInAllSpaces = false); - ClusterState(vespalib::nbostream& i); ClusterState(const ClusterState& other); ClusterState& operator=(const ClusterState& other) = delete; ~ClusterState(); @@ -45,23 +45,32 @@ public: * compared to the complete list of nodes, and deigns the system to be * unusable. */ - bool clusterUp() const; + [[nodiscard]] bool clusterUp() const noexcept; /** * Returns false if this node has been set in a state where it should not * receive external load. + * + * TODO rename to indicate bucket space affinity. */ - bool nodeUp() const; + [[nodiscard]] bool nodeUp() const noexcept; /** * Returns true iff this node is marked as Initializing in the cluster state. + * + * TODO remove, init no longer used internally. */ - bool nodeInitializing() const; + [[nodiscard]] bool nodeInitializing() const noexcept; /** * Returns true iff this node is marked as Retired in the cluster state. */ - bool nodeRetired() const; + [[nodiscard]] bool nodeRetired() const noexcept; + + /** + * Returns true iff this node is marked as Maintenance in all bucket space cluster states. + */ + [[nodiscard]] bool nodeMaintenance() const noexcept; /** * Returns a serialized form of this object. @@ -72,9 +81,10 @@ private: std::unique_ptr<lib::ClusterState> _state; std::unique_ptr<lib::Distribution> _distribution; uint16_t _nodeIndex; + bool _maintenanceInAllSpaces; void deserialize(vespalib::nbostream&); - bool nodeHasStateOneOf(const char* states) const; + bool nodeHasStateOneOf(const char* states) const noexcept; }; } diff --git a/persistence/src/vespa/persistence/spi/result.cpp b/persistence/src/vespa/persistence/spi/result.cpp index 2e24c9c2f91..e458d58fe69 100644 --- a/persistence/src/vespa/persistence/spi/result.cpp +++ b/persistence/src/vespa/persistence/spi/result.cpp @@ -8,7 +8,9 @@ namespace storage::spi { Result::Result(const Result &) = default; +Result::Result(Result&&) noexcept = default; Result & Result::operator = (const Result &) = default; +Result& Result::operator=(Result&&) noexcept = default; Result::~Result() = default; vespalib::string diff --git a/persistence/src/vespa/persistence/spi/result.h b/persistence/src/vespa/persistence/spi/result.h index 70bd37590a1..c734a885b12 100644 --- a/persistence/src/vespa/persistence/spi/result.h +++ b/persistence/src/vespa/persistence/spi/result.h @@ -36,7 +36,9 @@ public: {} Result(const Result &); + Result(Result&&) noexcept; Result & operator = (const Result &); + Result& operator=(Result&&) noexcept; virtual ~Result(); @@ -133,6 +133,7 @@ <module>vespa-athenz</module> <module>vespa-documentgen-plugin</module> <module>vespa-feed-client</module> + <module>vespa-feed-client-api</module> <module>vespa-feed-client-cli</module> <module>vespa-hadoop</module> <module>vespa-http-client</module> diff --git a/predicate-search-core/abi-spec.json b/predicate-search-core/abi-spec.json new file mode 100644 index 00000000000..19dbb8a5a76 --- /dev/null +++ b/predicate-search-core/abi-spec.json @@ -0,0 +1,309 @@ +{ + "com.yahoo.document.predicate.BinaryFormat": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public static byte[] encode(com.yahoo.document.predicate.Predicate)", + "public static com.yahoo.document.predicate.Predicate decode(byte[])" + ], + "fields": [] + }, + "com.yahoo.document.predicate.BooleanPredicate": { + "superClass": "com.yahoo.document.predicate.PredicateValue", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(boolean)", + "public boolean getValue()", + "public com.yahoo.document.predicate.BooleanPredicate setValue(boolean)", + "public com.yahoo.document.predicate.BooleanPredicate clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Conjunction": { + "superClass": "com.yahoo.document.predicate.PredicateOperator", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public varargs void <init>(com.yahoo.document.predicate.Predicate[])", + "public void <init>(java.util.List)", + "public com.yahoo.document.predicate.Conjunction addOperand(com.yahoo.document.predicate.Predicate)", + "public com.yahoo.document.predicate.Conjunction addOperands(java.util.Collection)", + "public com.yahoo.document.predicate.Conjunction setOperands(java.util.Collection)", + "public java.util.List getOperands()", + "public com.yahoo.document.predicate.Conjunction clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Disjunction": { + "superClass": "com.yahoo.document.predicate.PredicateOperator", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public varargs void <init>(com.yahoo.document.predicate.Predicate[])", + "public void <init>(java.util.List)", + "public com.yahoo.document.predicate.Disjunction addOperand(com.yahoo.document.predicate.Predicate)", + "public com.yahoo.document.predicate.Disjunction addOperands(java.util.Collection)", + "public com.yahoo.document.predicate.Disjunction setOperands(java.util.Collection)", + "public java.util.List getOperands()", + "public com.yahoo.document.predicate.Disjunction clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.FeatureConjunction": { + "superClass": "com.yahoo.document.predicate.PredicateOperator", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.util.List)", + "public static boolean isValidFeatureConjunctionOperand(com.yahoo.document.predicate.Predicate)", + "public java.util.List getOperands()", + "protected void appendTo(java.lang.StringBuilder)", + "public com.yahoo.document.predicate.FeatureConjunction clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.FeatureRange": { + "superClass": "com.yahoo.document.predicate.PredicateValue", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String)", + "public void <init>(java.lang.String, java.lang.Long, java.lang.Long)", + "public com.yahoo.document.predicate.FeatureRange setKey(java.lang.String)", + "public java.lang.String getKey()", + "public com.yahoo.document.predicate.FeatureRange setFromInclusive(java.lang.Long)", + "public java.lang.Long getFromInclusive()", + "public com.yahoo.document.predicate.FeatureRange setToInclusive(java.lang.Long)", + "public java.lang.Long getToInclusive()", + "public void addPartition(com.yahoo.document.predicate.RangePartition)", + "public java.util.List getEdgePartitions()", + "public java.util.List getPartitions()", + "public void clearPartitions()", + "public com.yahoo.document.predicate.FeatureRange clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public static com.yahoo.document.predicate.FeatureRange buildFromMixedIn(java.lang.String, java.util.List, int)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.FeatureSet": { + "superClass": "com.yahoo.document.predicate.PredicateValue", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public varargs void <init>(java.lang.String, java.lang.String[])", + "public void <init>(java.lang.String, java.util.Collection)", + "public com.yahoo.document.predicate.FeatureSet setKey(java.lang.String)", + "public java.lang.String getKey()", + "public com.yahoo.document.predicate.FeatureSet addValue(java.lang.String)", + "public com.yahoo.document.predicate.FeatureSet addValues(java.util.Collection)", + "public com.yahoo.document.predicate.FeatureSet setValues(java.util.Collection)", + "public java.util.Set getValues()", + "public com.yahoo.document.predicate.FeatureSet clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "protected void appendNegatedTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Negation": { + "superClass": "com.yahoo.document.predicate.PredicateOperator", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(com.yahoo.document.predicate.Predicate)", + "public com.yahoo.document.predicate.Negation setOperand(com.yahoo.document.predicate.Predicate)", + "public com.yahoo.document.predicate.Predicate getOperand()", + "public java.util.List getOperands()", + "public com.yahoo.document.predicate.Negation clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Predicate": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.lang.Cloneable" + ], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "public com.yahoo.document.predicate.Predicate clone()", + "public final java.lang.String toString()", + "protected abstract void appendTo(java.lang.StringBuilder)", + "protected static void appendQuotedTo(java.lang.String, java.lang.StringBuilder)", + "public static java.lang.String asciiEncode(java.lang.String)", + "public static java.lang.String asciiDecode(java.lang.String)", + "public static com.yahoo.document.predicate.Predicate fromBinary(byte[])", + "public static com.yahoo.document.predicate.Predicate fromString(java.lang.String)", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.PredicateHash": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public static long hash64(java.lang.String)" + ], + "fields": [] + }, + "com.yahoo.document.predicate.PredicateOperator": { + "superClass": "com.yahoo.document.predicate.Predicate", + "interfaces": [], + "attributes": [ + "public", + "abstract" + ], + "methods": [ + "public void <init>()", + "public abstract java.util.List getOperands()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Predicates$FeatureBuilder": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String)", + "public com.yahoo.document.predicate.FeatureRange lessThan(long)", + "public com.yahoo.document.predicate.FeatureRange lessThanOrEqualTo(long)", + "public com.yahoo.document.predicate.FeatureRange greaterThan(long)", + "public com.yahoo.document.predicate.FeatureRange greaterThanOrEqualTo(long)", + "public com.yahoo.document.predicate.FeatureRange inRange(long, long)", + "public com.yahoo.document.predicate.Negation notInRange(long, long)", + "public varargs com.yahoo.document.predicate.FeatureSet inSet(java.lang.String[])", + "public varargs com.yahoo.document.predicate.Negation notInSet(java.lang.String[])" + ], + "fields": [] + }, + "com.yahoo.document.predicate.Predicates": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public static varargs com.yahoo.document.predicate.Conjunction and(com.yahoo.document.predicate.Predicate[])", + "public static varargs com.yahoo.document.predicate.Disjunction or(com.yahoo.document.predicate.Predicate[])", + "public static com.yahoo.document.predicate.Negation not(com.yahoo.document.predicate.Predicate)", + "public static com.yahoo.document.predicate.BooleanPredicate value(boolean)", + "public static com.yahoo.document.predicate.Predicates$FeatureBuilder feature(java.lang.String)" + ], + "fields": [] + }, + "com.yahoo.document.predicate.RangeEdgePartition": { + "superClass": "com.yahoo.document.predicate.RangePartition", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String, long, int, int)", + "public long getValue()", + "public int getLowerBound()", + "public int getUpperBound()", + "public com.yahoo.document.predicate.RangeEdgePartition clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public long encodeBounds()", + "public bridge synthetic com.yahoo.document.predicate.RangePartition clone()", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.RangePartition": { + "superClass": "com.yahoo.document.predicate.PredicateValue", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String)", + "public void <init>(java.lang.String, long, long, boolean)", + "public java.lang.String getLabel()", + "public com.yahoo.document.predicate.RangePartition clone()", + "public int hashCode()", + "public boolean equals(java.lang.Object)", + "protected void appendTo(java.lang.StringBuilder)", + "public bridge synthetic com.yahoo.document.predicate.Predicate clone()", + "public bridge synthetic java.lang.Object clone()" + ], + "fields": [] + }, + "com.yahoo.document.predicate.SimplePredicates": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public static com.yahoo.document.predicate.Predicate newPredicate()", + "public static com.yahoo.document.predicate.Predicate newString(java.lang.String)", + "public static varargs java.util.List newStrings(java.lang.String[])" + ], + "fields": [] + } +}
\ No newline at end of file diff --git a/predicate-search-core/pom.xml b/predicate-search-core/pom.xml index 08354fd5826..c30cf4633a8 100644 --- a/predicate-search-core/pom.xml +++ b/predicate-search-core/pom.xml @@ -42,6 +42,10 @@ <build> <plugins> <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + <plugin> <groupId>org.antlr</groupId> <artifactId>antlr3-maven-plugin</artifactId> <executions> diff --git a/predicate-search/abi-spec.json b/predicate-search/abi-spec.json new file mode 100644 index 00000000000..c110ffe7a43 --- /dev/null +++ b/predicate-search/abi-spec.json @@ -0,0 +1,159 @@ +{ + "com.yahoo.search.predicate.Config$Builder": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public com.yahoo.search.predicate.Config$Builder setArity(int)", + "public com.yahoo.search.predicate.Config$Builder setLowerBound(long)", + "public com.yahoo.search.predicate.Config$Builder setUpperBound(long)", + "public com.yahoo.search.predicate.Config$Builder setUseConjunctionAlgorithm(boolean)", + "public com.yahoo.search.predicate.Config build()" + ], + "fields": [] + }, + "com.yahoo.search.predicate.Config": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void writeToOutputStream(java.io.DataOutputStream)", + "public static com.yahoo.search.predicate.Config fromInputStream(java.io.DataInputStream)" + ], + "fields": [ + "public final int arity", + "public final long lowerBound", + "public final long upperBound", + "public final boolean useConjunctionAlgorithm" + ] + }, + "com.yahoo.search.predicate.Hit": { + "superClass": "java.lang.Object", + "interfaces": [ + "java.lang.Comparable" + ], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(int)", + "public void <init>(int, long)", + "public java.lang.String toString()", + "public boolean equals(java.lang.Object)", + "public int hashCode()", + "public int getDocId()", + "public long getSubquery()", + "public int compareTo(com.yahoo.search.predicate.Hit)", + "public bridge synthetic int compareTo(java.lang.Object)" + ], + "fields": [] + }, + "com.yahoo.search.predicate.PredicateIndex$Searcher": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public java.util.stream.Stream search(com.yahoo.search.predicate.PredicateQuery)" + ], + "fields": [] + }, + "com.yahoo.search.predicate.PredicateIndex": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void rebuildPostingListCache()", + "public com.yahoo.search.predicate.PredicateIndex$Searcher searcher()", + "public void writeToOutputStream(java.io.DataOutputStream)", + "public static com.yahoo.search.predicate.PredicateIndex fromInputStream(java.io.DataInputStream)" + ], + "fields": [] + }, + "com.yahoo.search.predicate.PredicateIndexBuilder$PredicateIndexStats": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.util.List, com.yahoo.search.predicate.index.SimpleIndex$Builder, com.yahoo.search.predicate.index.SimpleIndex$Builder, com.yahoo.search.predicate.index.PredicateIntervalStore$Builder, com.yahoo.search.predicate.index.conjunction.ConjunctionIndexBuilder, int, int)", + "public void putValues(java.util.Map)", + "public java.lang.String toString()" + ], + "fields": [] + }, + "com.yahoo.search.predicate.PredicateIndexBuilder": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(int)", + "public void <init>(int, long, long)", + "public void <init>(com.yahoo.search.predicate.Config)", + "public void indexDocument(int, com.yahoo.document.predicate.Predicate)", + "public com.yahoo.search.predicate.PredicateIndex build()", + "public int getZeroConstraintDocCount()", + "public com.yahoo.search.predicate.PredicateIndexBuilder$PredicateIndexStats getStats()" + ], + "fields": [] + }, + "com.yahoo.search.predicate.PredicateQuery$Feature": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String, java.lang.String, long)" + ], + "fields": [ + "public final java.lang.String key", + "public final java.lang.String value", + "public final long subqueryBitmap", + "public final long featureHash" + ] + }, + "com.yahoo.search.predicate.PredicateQuery$RangeFeature": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String, long, long)" + ], + "fields": [ + "public final java.lang.String key", + "public final long value", + "public final long subqueryBitmap" + ] + }, + "com.yahoo.search.predicate.PredicateQuery": { + "superClass": "java.lang.Object", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>()", + "public void addFeature(java.lang.String, java.lang.String)", + "public void addFeature(java.lang.String, java.lang.String, long)", + "public void addRangeFeature(java.lang.String, long)", + "public void addRangeFeature(java.lang.String, long, long)", + "public java.util.List getFeatures()", + "public java.util.List getRangeFeatures()" + ], + "fields": [] + } +}
\ No newline at end of file diff --git a/predicate-search/pom.xml b/predicate-search/pom.xml index 02dd7d31ec1..3f0b8b3de4d 100644 --- a/predicate-search/pom.xml +++ b/predicate-search/pom.xml @@ -57,6 +57,10 @@ <build> <plugins> <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + <plugin> <artifactId>maven-assembly-plugin</artifactId> <configuration> <descriptorRefs> diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/Config.java b/predicate-search/src/main/java/com/yahoo/search/predicate/Config.java index 9b347ebe652..6b731b6221d 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/Config.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/Config.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import java.io.DataInputStream; import java.io.DataOutputStream; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/Hit.java b/predicate-search/src/main/java/com/yahoo/search/predicate/Hit.java index 5314e8b8f5e..cb2cc538dc7 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/Hit.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/Hit.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * Represents a hit from the predicate search algorithm. diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndex.java b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndex.java index f718ca4aba5..38cb56c2e2b 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndex.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndex.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.document.predicate.Predicate; import com.yahoo.search.predicate.index.*; import com.yahoo.search.predicate.index.conjunction.ConjunctionHit; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndexBuilder.java b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndexBuilder.java index c1500299d02..d9153b24a87 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndexBuilder.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateIndexBuilder.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.base.Preconditions; import com.google.common.primitives.Bytes; import com.google.common.primitives.Ints; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateQuery.java b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateQuery.java index bb0b33e8522..13f0c268f15 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateQuery.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/PredicateQuery.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import java.util.ArrayList; import java.util.List; diff --git a/screwdriver.yaml b/screwdriver.yaml index 049656b4c00..14d9902d335 100644 --- a/screwdriver.yaml +++ b/screwdriver.yaml @@ -60,7 +60,7 @@ jobs: environment: LOCAL_MVN_REPO: "/tmp/vespa/mvnrepo" - VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.javadoc.skip=true -Dmaven.source.skip=true" + VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.source.skip=true" CCACHE_TMP_DIR: "/tmp/ccache_tmp" CCACHE_DATA_DIR: "/tmp/vespa/ccache" MAIN_CACHE_FILE: "/main_job_cache/vespa.tar" @@ -139,6 +139,7 @@ jobs: cd $WORKDIR/vespa export FACTORY_VESPA_VERSION=$VESPA_VERSION NUM_THREADS=$(( $(nproc) + 2 )) + time make -C client/go BIN=$WORKDIR/vespa-install/opt/vespa/bin SHARE=$WORKDIR/vespa-install/usr/share install-all time ./bootstrap.sh java time mvn -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS install cmake3 -DVESPA_UNPRIVILEGED=no . diff --git a/screwdriver/build-vespa.sh b/screwdriver/build-vespa.sh index 997422f3822..f321e3820fd 100755 --- a/screwdriver/build-vespa.sh +++ b/screwdriver/build-vespa.sh @@ -8,6 +8,7 @@ readonly NUM_THREADS=$(( $(nproc) + 2 )) source /etc/profile.d/enable-devtoolset-10.sh source /etc/profile.d/enable-rh-maven35.sh +source /etc/profile.d/enable-rh-git227.sh export MALLOC_ARENA_MAX=1 export MAVEN_OPTS="-Xss1m -Xms128m -Xmx2g" @@ -39,10 +40,10 @@ case $SHOULD_BUILD in mvn -V $VESPA_MAVEN_EXTRA_OPTS install ;; go) - make -C client/go -j ${NUM_THREADS} + make -C client/go install-all ;; *) - make -C client/go -j ${NUM_THREADS} + make -C client/go install-all ./bootstrap.sh java time mvn -V $VESPA_MAVEN_EXTRA_OPTS install cmake3 -DVESPA_UNPRIVILEGED=no . diff --git a/searchcommon/src/vespa/searchcommon/attribute/config.h b/searchcommon/src/vespa/searchcommon/attribute/config.h index e6a428e5843..f572f5038fc 100644 --- a/searchcommon/src/vespa/searchcommon/attribute/config.h +++ b/searchcommon/src/vespa/searchcommon/attribute/config.h @@ -6,10 +6,10 @@ #include "collectiontype.h" #include "hnsw_index_params.h" #include "predicate_params.h" -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchcommon/common/growstrategy.h> #include <vespa/searchcommon/common/dictionary_config.h> #include <vespa/eval/eval/value_type.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <cassert> #include <optional> @@ -23,6 +23,7 @@ namespace search::attribute { class Config { public: enum class Match { CASED, UNCASED }; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; Config() noexcept; Config(BasicType bt) noexcept : Config(bt, CollectionType::SINGLE) { } Config(BasicType bt, CollectionType ct) noexcept : Config(bt, ct, false) { } diff --git a/searchcommon/src/vespa/searchcommon/attribute/hnsw_index_params.h b/searchcommon/src/vespa/searchcommon/attribute/hnsw_index_params.h index 77e994daa85..4f9d3c5593c 100644 --- a/searchcommon/src/vespa/searchcommon/attribute/hnsw_index_params.h +++ b/searchcommon/src/vespa/searchcommon/attribute/hnsw_index_params.h @@ -22,7 +22,7 @@ public: HnswIndexParams(uint32_t max_links_per_node_in, uint32_t neighbors_to_explore_at_insert_in, DistanceMetric distance_metric_in, - bool multi_threaded_indexing_in = false) + bool multi_threaded_indexing_in = false) noexcept : _max_links_per_node(max_links_per_node_in), _neighbors_to_explore_at_insert(neighbors_to_explore_at_insert_in), _distance_metric(distance_metric_in), diff --git a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt index 77e638d7193..6cc02ae7884 100644 --- a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt +++ b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt @@ -1,7 +1,6 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(searchcommon_searchcommon_common OBJECT SOURCES - compaction_strategy.cpp datatype.cpp dictionary_config.cpp growstrategy.cpp diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp b/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp deleted file mode 100644 index 22f50ba3049..00000000000 --- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "compaction_strategy.h" -#include <iostream> -namespace search { - -std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy) -{ - os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() << - ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() << - "}"; - return os; -} - -} diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt index e08b951d3f3..c63ccfe4e24 100644 --- a/searchcore/CMakeLists.txt +++ b/searchcore/CMakeLists.txt @@ -154,6 +154,7 @@ vespa_define_module( src/tests/proton/server/health_adapter src/tests/proton/server/memory_flush_config_updater src/tests/proton/server/memoryflush + src/tests/proton/server/shared_threading_service src/tests/proton/statusreport src/tests/proton/summaryengine src/tests/proton/verify_ranksetup diff --git a/searchcore/src/apps/tests/persistenceconformance_test.cpp b/searchcore/src/apps/tests/persistenceconformance_test.cpp index 8238eb21831..214c57557bc 100644 --- a/searchcore/src/apps/tests/persistenceconformance_test.cpp +++ b/searchcore/src/apps/tests/persistenceconformance_test.cpp @@ -3,15 +3,20 @@ #include <vespa/vespalib/testkit/testapp.h> #include <tests/proton/common/dummydbowner.h> +#include <vespa/config-attributes.h> +#include <vespa/config-bucketspaces.h> #include <vespa/config-imported-fields.h> +#include <vespa/config-indexschema.h> #include <vespa/config-rank-profiles.h> +#include <vespa/config-summary.h> #include <vespa/config-summarymap.h> #include <vespa/document/base/testdocman.h> +#include <vespa/document/config/documenttypes_config_fwd.h> +#include <vespa/document/repo/documenttyperepo.h> +#include <vespa/document/test/make_bucket_space.h> #include <vespa/fastos/file.h> #include <vespa/persistence/conformancetest/conformancetest.h> #include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> -#include <vespa/document/repo/documenttyperepo.h> -#include <vespa/document/test/make_bucket_space.h> #include <vespa/searchcommon/common/schemaconfigurer.h> #include <vespa/searchcore/proton/common/alloc_config.h> #include <vespa/searchcore/proton/common/hw_info.h> @@ -28,13 +33,10 @@ #include <vespa/searchcore/proton/server/persistencehandlerproxy.h> #include <vespa/searchcore/proton/server/threading_service_config.h> #include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> +#include <vespa/searchcore/proton/test/mock_shared_threading_service.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/searchlib/transactionlog/translogserver.h> #include <vespa/searchsummary/config/config-juniperrc.h> -#include <vespa/config-bucketspaces.h> -#include <vespa/config-attributes.h> -#include <vespa/config-indexschema.h> -#include <vespa/config-summary.h> #include <vespa/vespalib/io/fileutil.h> #include <vespa/vespalib/util/size_literals.h> @@ -54,7 +56,6 @@ using std::shared_ptr; using document::BucketSpace; using document::DocumentType; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; using document::TestDocMan; using document::test::makeBucketSpace; using search::TuneFileDocumentDB; @@ -174,6 +175,7 @@ private: mutable DummyWireService _metricsWireService; mutable MemoryConfigStores _config_stores; vespalib::ThreadStackExecutor _summaryExecutor; + MockSharedThreadingService _shared_service; storage::spi::dummy::DummyBucketExecutor _bucketExecutor; public: @@ -202,7 +204,7 @@ public: mgr.nextGeneration(0ms); return DocumentDB::create(_baseDir, mgr.getConfig(), _tlsSpec, _queryLimiter, _clock, docType, bucketSpace, *b->getProtonConfigSP(), const_cast<DocumentDBFactory &>(*this), - _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _metricsWireService, + _shared_service, _bucketExecutor, _tls, _metricsWireService, _fileHeaderContext, _config_stores.getConfigStore(docType.toString()), std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo()); } @@ -218,6 +220,7 @@ DocumentDBFactory::DocumentDBFactory(const vespalib::string &baseDir, int tlsLis _clock(), _metricsWireService(), _summaryExecutor(8, 128_Ki), + _shared_service(_summaryExecutor, _summaryExecutor), _bucketExecutor(2) {} DocumentDBFactory::~DocumentDBFactory() = default; diff --git a/searchcore/src/apps/vespa-dump-feed/vespa-dump-feed.cpp b/searchcore/src/apps/vespa-dump-feed/vespa-dump-feed.cpp index 8db828920c0..9819a1d50af 100644 --- a/searchcore/src/apps/vespa-dump-feed/vespa-dump-feed.cpp +++ b/searchcore/src/apps/vespa-dump-feed/vespa-dump-feed.cpp @@ -146,7 +146,7 @@ std::unique_ptr<CFG> getConfig() { } std::shared_ptr<const document::DocumentTypeRepo> getRepo() { - typedef document::DocumenttypesConfig DCFG; + typedef document::config::DocumenttypesConfig DCFG; std::unique_ptr<DCFG> dcfg = getConfig<DCFG>(); std::shared_ptr<const document::DocumentTypeRepo> ret; if (dcfg.get() != 0) { @@ -180,7 +180,7 @@ App::Main() fprintf(stderr, "input feed: %s\n", feedFile.c_str()); fprintf(stderr, "output directory: %s\n", dirName.c_str()); vespalib::mkdir(dirName); - typedef document::DocumenttypesConfig DCFG; + typedef document::config::DocumenttypesConfig DCFG; if (!writeConfig(getConfig<DCFG>(), dirName)) { fprintf(stderr, "error: could not save config to disk\n"); return 1; diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp index 1ccd8221aff..1488e87afdf 100644 --- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp +++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp @@ -1,5 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/document/repo/documenttyperepo.h> @@ -36,8 +37,6 @@ using namespace std::chrono_literals; using document::DocumentTypeRepo; using document::DocumentTypeRepoFactory; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using search::bmcluster::AvgSampler; using search::bmcluster::BmClusterController; using search::bmcluster::IBmFeedHandler; diff --git a/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp b/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp index f9833b430a0..c1dbe9b2bd2 100644 --- a/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp +++ b/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp @@ -1,5 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/document/repo/documenttyperepo.h> @@ -38,8 +39,6 @@ using namespace std::chrono_literals; using document::DocumentTypeRepo; using document::DocumentTypeRepoFactory; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using search::bmcluster::AvgSampler; using search::bmcluster::BmClusterController; using search::bmcluster::IBmFeedHandler; diff --git a/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp b/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp index 5c777baa3bb..a7f23c382e9 100644 --- a/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp +++ b/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp @@ -8,6 +8,7 @@ #include <vespa/vespalib/util/programoptions.h> #include <vespa/vespalib/util/xmlstream.h> #include <vespa/vespalib/util/time.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/config/config-documenttypes.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/fieldvalue/document.h> @@ -25,7 +26,6 @@ using namespace search; using namespace search::common; using namespace search::transactionlog; -using document::DocumenttypesConfig; using document::DocumentTypeRepo; typedef std::shared_ptr<DocumenttypesConfig> DocumenttypesConfigSP; diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp b/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp index 62145a118b0..4ed57fef391 100644 --- a/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp +++ b/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp @@ -1,5 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/fieldvalue/intfieldvalue.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/searchcore/proton/attribute/attribute_populator.h> diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp index 3397b424ed0..1de56802484 100644 --- a/searchcore/src/tests/proton/attribute/attribute_test.cpp +++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp @@ -82,6 +82,7 @@ using std::string; using vespalib::ForegroundTaskExecutor; using vespalib::ForegroundThreadExecutor; using vespalib::SequencedTaskExecutorObserver; +using vespalib::datastore::CompactionStrategy; using vespalib::eval::SimpleValue; using vespalib::eval::TensorSpec; using vespalib::eval::Value; @@ -541,7 +542,7 @@ public: AttributeCollectionSpecFactory _factory; AttributeCollectionSpecTest(bool fastAccessOnly) : _builder(), - _factory(AllocStrategy(search::GrowStrategy(), search::CompactionStrategy(), 100), fastAccessOnly) + _factory(AllocStrategy(search::GrowStrategy(), CompactionStrategy(), 100), fastAccessOnly) { addAttribute("a1", false); addAttribute("a2", true); diff --git a/searchcore/src/tests/proton/attribute/attributeflush_test.cpp b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp index e29040a7984..e85bf3c4baa 100644 --- a/searchcore/src/tests/proton/attribute/attributeflush_test.cpp +++ b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp @@ -19,6 +19,7 @@ #include <vespa/vespalib/util/foregroundtaskexecutor.h> #include <vespa/vespalib/util/size_literals.h> #include <vespa/vespalib/util/threadstackexecutor.h> +#include <vespa/vespalib/util/idestructorcallback.h> #include <thread> #include <vespa/log/log.h> @@ -568,13 +569,13 @@ Test::requireThatShrinkWorks() EXPECT_FALSE(av->canShrinkLidSpace()); EXPECT_EQUAL(1000u, av->getNumDocs()); EXPECT_EQUAL(100u, av->getCommittedDocIdLimit()); - aw.heartBeat(51); + aw.heartBeat(51, IDestructorCallback::SP()); EXPECT_TRUE(av->wantShrinkLidSpace()); EXPECT_FALSE(av->canShrinkLidSpace()); EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(), ft->getApproxMemoryGain().getAfter()); g.reset(); - aw.heartBeat(52); + aw.heartBeat(52, IDestructorCallback::SP()); EXPECT_TRUE(av->wantShrinkLidSpace()); EXPECT_TRUE(av->canShrinkLidSpace()); EXPECT_TRUE(ft->getApproxMemoryGain().getBefore() > diff --git a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp index 1483a0bd653..59503464222 100644 --- a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp +++ b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp @@ -7,8 +7,8 @@ using proton::AllocConfig; using proton::AllocStrategy; using proton::SubDbType; -using search::CompactionStrategy; using search::GrowStrategy; +using vespalib::datastore::CompactionStrategy; namespace { diff --git a/searchcore/src/tests/proton/common/cachedselect_test.cpp b/searchcore/src/tests/proton/common/cachedselect_test.cpp index 5799753f075..56ae361735e 100644 --- a/searchcore/src/tests/proton/common/cachedselect_test.cpp +++ b/searchcore/src/tests/proton/common/cachedselect_test.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/document/base/documentid.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/fieldvalue/document.h> #include <vespa/document/fieldvalue/intfieldvalue.h> diff --git a/searchcore/src/tests/proton/common/selectpruner_test.cpp b/searchcore/src/tests/proton/common/selectpruner_test.cpp index 3c793c4e6c3..c8c474e21bc 100644 --- a/searchcore/src/tests/proton/common/selectpruner_test.cpp +++ b/searchcore/src/tests/proton/common/selectpruner_test.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/stllike/string.h> #include <vespa/vespalib/testkit/testapp.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/datatype/documenttype.h> diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp index 5c3fe94a8d7..7eda54cdbee 100644 --- a/searchcore/src/tests/proton/docsummary/docsummary.cpp +++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp @@ -1,50 +1,54 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <tests/proton/common/dummydbowner.h> +#include <vespa/config-bucketspaces.h> #include <vespa/config/helper/configgetter.hpp> +#include <vespa/document/config/documenttypes_config_fwd.h> +#include <vespa/document/repo/documenttyperepo.h> +#include <vespa/document/test/make_bucket_space.h> #include <vespa/eval/eval/simple_value.h> #include <vespa/eval/eval/tensor_spec.h> -#include <vespa/eval/eval/value.h> #include <vespa/eval/eval/test/value_compare.h> -#include <vespa/document/repo/documenttyperepo.h> -#include <vespa/document/test/make_bucket_space.h> +#include <vespa/eval/eval/value.h> +#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> #include <vespa/searchcore/proton/attribute/attribute_writer.h> -#include <vespa/searchcore/proton/test/bucketfactory.h> #include <vespa/searchcore/proton/docsummary/docsumcontext.h> #include <vespa/searchcore/proton/docsummary/documentstoreadapter.h> #include <vespa/searchcore/proton/docsummary/summarymanager.h> #include <vespa/searchcore/proton/documentmetastore/documentmetastore.h> #include <vespa/searchcore/proton/feedoperation/putoperation.h> +#include <vespa/searchcore/proton/matching/querylimiter.h> #include <vespa/searchcore/proton/metrics/metricswireservice.h> #include <vespa/searchcore/proton/server/bootstrapconfig.h> #include <vespa/searchcore/proton/server/documentdb.h> -#include <vespa/searchcore/proton/server/feedhandler.h> #include <vespa/searchcore/proton/server/documentdbconfigmanager.h> +#include <vespa/searchcore/proton/server/feedhandler.h> #include <vespa/searchcore/proton/server/idocumentsubdb.h> #include <vespa/searchcore/proton/server/memoryconfigstore.h> #include <vespa/searchcore/proton/server/searchview.h> #include <vespa/searchcore/proton/server/summaryadapter.h> -#include <vespa/searchcore/proton/matching/querylimiter.h> -#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> -#include <vespa/vespalib/util/destructor_callbacks.h> +#include <vespa/searchcore/proton/test/bucketfactory.h> +#include <vespa/searchcore/proton/test/mock_shared_threading_service.h> #include <vespa/searchlib/engine/docsumapi.h> #include <vespa/searchlib/index/docbuilder.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/searchlib/tensor/tensor_attribute.h> #include <vespa/searchlib/transactionlog/nosyncproxy.h> #include <vespa/searchlib/transactionlog/translogserver.h> -#include <vespa/vespalib/data/slime/slime.h> -#include <vespa/vespalib/data/slime/json_format.h> #include <vespa/vespalib/data/simple_buffer.h> +#include <vespa/vespalib/data/slime/json_format.h> +#include <vespa/vespalib/data/slime/slime.h> #include <vespa/vespalib/encoding/base64.h> -#include <vespa/vespalib/util/size_literals.h> -#include <vespa/config-bucketspaces.h> #include <vespa/vespalib/testkit/testapp.h> +#include <vespa/vespalib/util/destructor_callbacks.h> +#include <vespa/vespalib/util/size_literals.h> #include <regex> #include <vespa/log/log.h> LOG_SETUP("docsummary_test"); +using config::ConfigGetter; + using namespace cloud::config::filedistribution; using namespace document; using namespace search::docsummary; @@ -55,7 +59,6 @@ using namespace search; using namespace std::chrono_literals; using vespalib::IDestructorCallback; -using document::DocumenttypesConfig; using document::test::makeBucketSpace; using search::TuneFileDocumentDB; using search::index::DummyFileHeaderContext; @@ -176,12 +179,13 @@ public: DummyFileHeaderContext _fileHeaderContext; TransLogServer _tls; vespalib::ThreadStackExecutor _summaryExecutor; + MockSharedThreadingService _shared_service; storage::spi::dummy::DummyBucketExecutor _bucketExecutor; bool _mkdirOk; matching::QueryLimiter _queryLimiter; vespalib::Clock _clock; DummyWireService _dummy; - config::DirSpec _spec; + ::config::DirSpec _spec; DocumentDBConfigHelper _configMgr; DocumentDBConfig::DocumenttypesConfigSP _documenttypesConfig; const std::shared_ptr<const DocumentTypeRepo> _repo; @@ -196,6 +200,7 @@ public: _fileHeaderContext(), _tls("tmp", 9013, ".", _fileHeaderContext), _summaryExecutor(8, 128_Ki), + _shared_service(_summaryExecutor, _summaryExecutor), _bucketExecutor(2), _mkdirOk(FastOS_File::MakeDirectory("tmpdb")), _queryLimiter(), @@ -224,7 +229,7 @@ public: } _ddb = DocumentDB::create("tmpdb", _configMgr.getConfig(), "tcp/localhost:9013", _queryLimiter, _clock, DocTypeName(docTypeName), makeBucketSpace(), *b->getProtonConfigSP(), *this, - _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy, _fileHeaderContext, + _shared_service, _bucketExecutor, _tls, _dummy, _fileHeaderContext, std::make_unique<MemoryConfigStore>(), std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo), _ddb->start(); @@ -1124,12 +1129,12 @@ Fixture::Fixture() _markupFields() { std::string cfgId("summary"); - _summaryCfg = config::ConfigGetter<vespa::config::search::SummaryConfig>::getConfig( - cfgId, config::FileSpec(TEST_PATH("summary.cfg"))); + _summaryCfg = ConfigGetter<vespa::config::search::SummaryConfig>::getConfig( + cfgId, ::config::FileSpec(TEST_PATH("summary.cfg"))); _resultCfg.ReadConfig(*_summaryCfg, cfgId.c_str()); std::string mapCfgId("summarymap"); - std::unique_ptr<vespa::config::search::SummarymapConfig> mapCfg = config::ConfigGetter<vespa::config::search::SummarymapConfig>::getConfig( - mapCfgId, config::FileSpec(TEST_PATH("summarymap.cfg"))); + std::unique_ptr<vespa::config::search::SummarymapConfig> mapCfg = ::config::ConfigGetter<vespa::config::search::SummarymapConfig>::getConfig( + mapCfgId, ::config::FileSpec(TEST_PATH("summarymap.cfg"))); for (size_t i = 0; i < mapCfg->override.size(); ++i) { const vespa::config::search::SummarymapConfig::Override & o = mapCfg->override[i]; if (o.command == "dynamicteaser") { diff --git a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp index 1fe32c8c1b3..06766ba370a 100644 --- a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp +++ b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp @@ -8,6 +8,7 @@ #include <vespa/document/base/documentid.h> #include <vespa/document/base/exceptions.h> #include <vespa/document/base/field.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/annotationtype.h> #include <vespa/document/datatype/arraydatatype.h> #include <vespa/document/datatype/datatype.h> @@ -61,8 +62,6 @@ using document::Document; using document::DocumentId; using document::DocumentType; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using document::DoubleFieldValue; using document::FeatureSet; using document::Field; diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp index 29748a2010c..cadfa8cd72f 100644 --- a/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp +++ b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp @@ -140,6 +140,11 @@ struct Fixture setNodeUp(bool value) { _calc->setNodeUp(value); + _calc->setNodeMaintenance(false); + _handler.notifyClusterStateChanged(_calc); + } + void setNodeMaintenance(bool value) { + _calc->setNodeMaintenance(value); _handler.notifyClusterStateChanged(_calc); } }; @@ -223,7 +228,7 @@ TEST_F("require that unready bucket can be reported as active", Fixture) } -TEST_F("require that node being down deactivates buckets", Fixture) +TEST_F("node going down (but not into maintenance state) deactivates all buckets", Fixture) { f._handler.handleSetCurrentState(f._ready.bucket(2), BucketInfo::ACTIVE, f._genResult); @@ -252,6 +257,42 @@ TEST_F("require that node being down deactivates buckets", Fixture) EXPECT_EQUAL(true, f._bucketInfo.getInfo().isActive()); } +TEST_F("node going into maintenance state does _not_ deactivate any buckets", Fixture) +{ + f._handler.handleSetCurrentState(f._ready.bucket(2), + BucketInfo::ACTIVE, f._genResult); + f.sync(); + f.setNodeMaintenance(true); + f.sync(); + f.handleGetBucketInfo(f._ready.bucket(2)); + EXPECT_TRUE(f._bucketInfo.getInfo().isActive()); +} + +TEST_F("node going from maintenance to up state deactivates all buckets", Fixture) +{ + f._handler.handleSetCurrentState(f._ready.bucket(2), + BucketInfo::ACTIVE, f._genResult); + f.sync(); + f.setNodeMaintenance(true); + f.sync(); + f.setNodeUp(true); + f.sync(); + f.handleGetBucketInfo(f._ready.bucket(2)); + EXPECT_FALSE(f._bucketInfo.getInfo().isActive()); +} + +TEST_F("node going from maintenance to down state deactivates all buckets", Fixture) +{ + f._handler.handleSetCurrentState(f._ready.bucket(2), + BucketInfo::ACTIVE, f._genResult); + f.sync(); + f.setNodeMaintenance(true); + f.sync(); + f.setNodeUp(false); + f.sync(); + f.handleGetBucketInfo(f._ready.bucket(2)); + EXPECT_FALSE(f._bucketInfo.getInfo().isActive()); +} TEST_MAIN() { diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp index 5ef02f60152..a57787d417e 100644 --- a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp +++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp @@ -23,18 +23,10 @@ using namespace proton; typedef std::vector<IFeedView::SP> FeedViewVector; -struct MyStreamHandler : public NewConfigOperation::IStreamHandler -{ - void serializeConfig(SerialNum, vespalib::nbostream &) override {} - void deserializeConfig(SerialNum, vespalib::nbostream &) override {} -}; - - struct MyFeedView : public test::DummyFeedView { typedef std::shared_ptr<MyFeedView> SP; DocumentMetaStore _metaStore; - MyStreamHandler _streamHandler; uint32_t _preparePut; uint32_t _handlePut; uint32_t _prepareRemove; @@ -56,7 +48,6 @@ struct MyFeedView : public test::DummyFeedView DocumentMetaStore::getFixedName(), search::GrowStrategy(), subDbType), - _streamHandler(), _preparePut(0), _handlePut(0), _prepareRemove(0), @@ -82,12 +73,12 @@ struct MyFeedView : public test::DummyFeedView void prepareRemove(RemoveOperation &) override { ++_prepareRemove; } void handleRemove(FeedToken, const RemoveOperation &) override { ++_handleRemove; } void prepareDeleteBucket(DeleteBucketOperation &) override { ++_prepareDeleteBucket; } - void handleDeleteBucket(const DeleteBucketOperation &) override { ++_handleDeleteBucket; } + void handleDeleteBucket(const DeleteBucketOperation &, DoneCallback) override { ++_handleDeleteBucket; } void prepareMove(MoveOperation &) override { ++_prepareMove; } - void handleMove(const MoveOperation &, IDestructorCallback::SP) override { ++_handleMove; } - void heartBeat(SerialNum) override { ++_heartBeat; } - void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override { ++_handlePrune; } - void handleCompactLidSpace(const CompactLidSpaceOperation &op) override { + void handleMove(const MoveOperation &, DoneCallback) override { ++_handleMove; } + void heartBeat(SerialNum, DoneCallback) override { ++_heartBeat; } + void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override { ++_handlePrune; } + void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback) override { _wantedLidLimit = op.getLidLimit(); } }; @@ -362,7 +353,7 @@ TEST_F("require that delete bucket is sent to all feed views", Fixture) EXPECT_EQUAL(1u, f._ready._view->_prepareDeleteBucket); EXPECT_EQUAL(1u, f._removed._view->_prepareDeleteBucket); EXPECT_EQUAL(1u, f._notReady._view->_prepareDeleteBucket); - f._view.handleDeleteBucket(op); + f._view.handleDeleteBucket(op, IDestructorCallback::SP()); EXPECT_EQUAL(1u, f._ready._view->_handleDeleteBucket); EXPECT_EQUAL(1u, f._removed._view->_handleDeleteBucket); EXPECT_EQUAL(1u, f._notReady._view->_handleDeleteBucket); @@ -371,7 +362,7 @@ TEST_F("require that delete bucket is sent to all feed views", Fixture) TEST_F("require that heart beat is sent to all feed views", Fixture) { - f._view.heartBeat(5); + f._view.heartBeat(5, IDestructorCallback::SP()); EXPECT_EQUAL(1u, f._ready._view->_heartBeat); EXPECT_EQUAL(1u, f._removed._view->_heartBeat); EXPECT_EQUAL(1u, f._notReady._view->_heartBeat); @@ -381,7 +372,7 @@ TEST_F("require that heart beat is sent to all feed views", Fixture) TEST_F("require that prune removed documents is sent to removed view", Fixture) { PruneRemovedDocumentsOperation op; - f._view.handlePruneRemovedDocuments(op); + f._view.handlePruneRemovedDocuments(op, IDestructorCallback::SP()); EXPECT_EQUAL(0u, f._ready._view->_handlePrune); EXPECT_EQUAL(1u, f._removed._view->_handlePrune); EXPECT_EQUAL(0u, f._notReady._view->_handlePrune); @@ -429,7 +420,7 @@ TEST_F("require that calculator can be updated", Fixture) TEST_F("require that compactLidSpace() is sent to correct feed view", Fixture) { - f._view.handleCompactLidSpace(CompactLidSpaceOperation(1, 99)); + f._view.handleCompactLidSpace(CompactLidSpaceOperation(1, 99), IDestructorCallback::SP()); EXPECT_EQUAL(0u, f._ready._view->_wantedLidLimit); EXPECT_EQUAL(99u, f._removed._view->_wantedLidLimit); EXPECT_EQUAL(0u, f._notReady._view->_wantedLidLimit); diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp index cd694cb938c..b597bc18cc5 100644 --- a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp +++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp @@ -2,6 +2,7 @@ #include <vespa/vespalib/testkit/testapp.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/searchcore/proton/attribute/attribute_writer.h> #include <vespa/searchcore/proton/attribute/attributemanager.h> diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp index 73160d1db94..4825dc6e8a7 100644 --- a/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp +++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp @@ -49,19 +49,19 @@ struct Fixture } LidSet retval; for (uint32_t i = 0; i < count; ++i) { - uint32_t lid = next(compactLidLimit, false); + uint32_t lid = next(compactLidLimit); retval.insert(lid); EXPECT_TRUE(_itr->valid() || lid <= compactLidLimit); } - EXPECT_EQUAL(0u, next(compactLidLimit, false)); + EXPECT_EQUAL(0u, next(compactLidLimit)); EXPECT_FALSE(_itr->valid()); return retval; } - uint32_t next(uint32_t compactLidLimit, bool retry = false) { + uint32_t next(uint32_t compactLidLimit) { if (!_itr) { _itr = std::make_unique<DocumentScanIterator>(_metaStore); } - return _itr->next(compactLidLimit, retry).lid; + return _itr->next(compactLidLimit).lid; } }; @@ -82,14 +82,6 @@ TEST_F("require that only lids > lid limit are returned", Fixture) assertLidSet({5,6,7,8}, f.scan(4, 4)); } -TEST_F("require that we start scan at previous doc if retry is set", Fixture) -{ - f.add({1,2,3,4,5,6,7,8}); - uint32_t lid1 = f.next(4, false); - uint32_t lid2 = f.next(4, true); - EXPECT_EQUAL(lid1, lid2); -} - TEST_MAIN() { TEST_RUN_ALL(); diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp index 27ee7dee3e9..1851455e321 100644 --- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp +++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp @@ -60,6 +60,7 @@ using storage::spi::Timestamp; using vespa::config::search::core::ProtonConfig; using vespa::config::content::core::BucketspacesConfig; using vespalib::mkdir; +using vespalib::datastore::CompactionStrategy; using proton::index::IndexConfig; typedef StoreOnlyDocSubDB::Config StoreOnlyConfig; @@ -234,8 +235,7 @@ MySearchableContext::MySearchableContext(IThreadingService &writeService, IBucketDBHandlerInitializer & bucketDBHandlerInitializer) : _fastUpdCtx(writeService, bucketDB, bucketDBHandlerInitializer), _queryLimiter(), _clock(), - _ctx(_fastUpdCtx._ctx, _queryLimiter, - _clock, dynamic_cast<vespalib::SyncableThreadExecutor &>(writeService.shared())) + _ctx(_fastUpdCtx._ctx, _queryLimiter, _clock, writeService.shared()) {} MySearchableContext::~MySearchableContext() = default; @@ -275,7 +275,7 @@ struct MyConfigSnapshot std::make_shared<FiledistributorrpcConfig>(), std::make_shared<BucketspacesConfig>(), tuneFileDocumentDB, HwInfo()); - config::DirSpec spec(cfgDir); + ::config::DirSpec spec(cfgDir); DocumentDBConfigHelper mgr(spec, "searchdocument"); mgr.forwardConfig(_bootstrap); mgr.nextGeneration(1ms); @@ -313,14 +313,22 @@ struct FixtureBase init(); } ~FixtureBase() { - _writeService.sync_all_executors(); - _writeService.master().execute(makeLambdaTask([this]() { _subDb.close(); })); - _writeService.sync_all_executors(); + _writeService.master().execute(makeLambdaTask([this]() { _subDb.close(); })); + _writeService.shutdown(); + } + void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> & calc) { + vespalib::Gate gate; + _subDb.setBucketStateCalculator(calc, std::make_shared<vespalib::GateCallback>(gate)); + gate.await(); } template <typename FunctionType> void runInMasterAndSync(FunctionType func) { proton::test::runInMasterAndSync(_writeService, func); } + template <typename FunctionType> + void runInMaster(FunctionType func) { + proton::test::runInMaster(_writeService, func); + } void init() { DocumentSubDbInitializer::SP task = _subDb.createInitializer(*_snapshot->_cfg, Traits::configSerial(), IndexConfig()); @@ -337,7 +345,7 @@ struct FixtureBase runInMasterAndSync([&]() { performReconfig(serialNum, reconfigSchema, reconfigConfigDir); }); } void performReconfig(SerialNum serialNum, const Schema &reconfigSchema, const vespalib::string &reconfigConfigDir) { - MyConfigSnapshot::UP newCfg(new MyConfigSnapshot(reconfigSchema, reconfigConfigDir)); + auto newCfg = std::make_unique<MyConfigSnapshot>(reconfigSchema, reconfigConfigDir); DocumentDBConfig::ComparisonResult cmpResult; cmpResult.attributesChanged = true; cmpResult.documenttypesChanged = true; @@ -557,7 +565,7 @@ TEST_F("require that attribute manager can be reconfigured", SearchableFixture) TEST_F("require that subdb reflect retirement", FastAccessFixture) { - search::CompactionStrategy cfg(0.1, 0.3); + CompactionStrategy cfg(0.1, 0.3); EXPECT_FALSE(f._subDb.isNodeRetired()); auto unretired_cfg = f._subDb.computeCompactionStrategy(cfg); @@ -565,22 +573,22 @@ TEST_F("require that subdb reflect retirement", FastAccessFixture) auto calc = std::make_shared<proton::test::BucketStateCalculator>(); calc->setNodeRetired(true); - f._subDb.setBucketStateCalculator(calc); + f.setBucketStateCalculator(calc); EXPECT_TRUE(f._subDb.isNodeRetired()); auto retired_cfg = f._subDb.computeCompactionStrategy(cfg); EXPECT_TRUE(cfg != retired_cfg); - EXPECT_TRUE(search::CompactionStrategy(0.5, 0.5) == retired_cfg); + EXPECT_TRUE(CompactionStrategy(0.5, 0.5) == retired_cfg); calc->setNodeRetired(false); - f._subDb.setBucketStateCalculator(calc); + f.setBucketStateCalculator(calc); EXPECT_FALSE(f._subDb.isNodeRetired()); unretired_cfg = f._subDb.computeCompactionStrategy(cfg); EXPECT_TRUE(cfg == unretired_cfg); } TEST_F("require that attribute compaction config reflect retirement", FastAccessFixture) { - search::CompactionStrategy default_cfg(0.05, 0.2); - search::CompactionStrategy retired_cfg(0.5, 0.5); + CompactionStrategy default_cfg(0.05, 0.2); + CompactionStrategy retired_cfg(0.5, 0.5); auto guard = f._subDb.getAttributeManager()->getAttribute("attr1"); EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy()); @@ -588,21 +596,18 @@ TEST_F("require that attribute compaction config reflect retirement", FastAccess auto calc = std::make_shared<proton::test::BucketStateCalculator>(); calc->setNodeRetired(true); - f._subDb.setBucketStateCalculator(calc); - f._writeService.sync_all_executors(); + f.setBucketStateCalculator(calc); guard = f._subDb.getAttributeManager()->getAttribute("attr1"); EXPECT_EQUAL(retired_cfg, (*guard)->getConfig().getCompactionStrategy()); EXPECT_EQUAL(retired_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy()); f.basicReconfig(10); - f._writeService.sync_all_executors(); guard = f._subDb.getAttributeManager()->getAttribute("attr1"); EXPECT_EQUAL(retired_cfg, (*guard)->getConfig().getCompactionStrategy()); EXPECT_EQUAL(retired_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy()); calc->setNodeRetired(false); - f._subDb.setBucketStateCalculator(calc); - f._writeService.sync_all_executors(); + f.setBucketStateCalculator(calc); guard = f._subDb.getAttributeManager()->getAttribute("attr1"); EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy()); EXPECT_EQUAL(default_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy()); @@ -698,29 +703,31 @@ assertTarget(const vespalib::string &name, TEST_F("require that flush targets can be retrieved", FastAccessFixture) { IFlushTarget::List targets = getFlushTargets(f); - EXPECT_EQUAL(7u, targets.size()); + EXPECT_EQUAL(8u, targets.size()); EXPECT_EQUAL("subdb.attribute.flush.attr1", targets[0]->getName()); EXPECT_EQUAL("subdb.attribute.shrink.attr1", targets[1]->getName()); EXPECT_EQUAL("subdb.documentmetastore.flush", targets[2]->getName()); EXPECT_EQUAL("subdb.documentmetastore.shrink", targets[3]->getName()); - EXPECT_EQUAL("subdb.summary.compact", targets[4]->getName()); - EXPECT_EQUAL("subdb.summary.flush", targets[5]->getName()); - EXPECT_EQUAL("subdb.summary.shrink", targets[6]->getName()); + EXPECT_EQUAL("subdb.summary.compact_bloat", targets[4]->getName()); + EXPECT_EQUAL("subdb.summary.compact_spread", targets[5]->getName()); + EXPECT_EQUAL("subdb.summary.flush", targets[6]->getName()); + EXPECT_EQUAL("subdb.summary.shrink", targets[7]->getName()); } TEST_F("require that flush targets can be retrieved", SearchableFixture) { IFlushTarget::List targets = getFlushTargets(f); - EXPECT_EQUAL(9u, targets.size()); + EXPECT_EQUAL(10u, targets.size()); EXPECT_TRUE(assertTarget("subdb.attribute.flush.attr1", FType::SYNC, FComponent::ATTRIBUTE, *targets[0])); EXPECT_TRUE(assertTarget("subdb.attribute.shrink.attr1", FType::GC, FComponent::ATTRIBUTE, *targets[1])); EXPECT_TRUE(assertTarget("subdb.documentmetastore.flush", FType::SYNC, FComponent::ATTRIBUTE, *targets[2])); EXPECT_TRUE(assertTarget("subdb.documentmetastore.shrink", FType::GC, FComponent::ATTRIBUTE, *targets[3])); EXPECT_TRUE(assertTarget("subdb.memoryindex.flush", FType::FLUSH, FComponent::INDEX, *targets[4])); EXPECT_TRUE(assertTarget("subdb.memoryindex.fusion", FType::GC, FComponent::INDEX, *targets[5])); - EXPECT_TRUE(assertTarget("subdb.summary.compact", FType::GC, FComponent::DOCUMENT_STORE, *targets[6])); - EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[7])); - EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[8])); + EXPECT_TRUE(assertTarget("subdb.summary.compact_bloat", FType::GC, FComponent::DOCUMENT_STORE, *targets[6])); + EXPECT_TRUE(assertTarget("subdb.summary.compact_spread", FType::GC, FComponent::DOCUMENT_STORE, *targets[7])); + EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[8])); + EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[9])); } TEST_F("require that only fast-access attributes are instantiated", FastAccessOnlyFixture) @@ -782,7 +789,7 @@ struct DocumentHandler void putDoc(PutOperation &op) { IFeedView::SP feedView = _f._subDb.getFeedView(); vespalib::Gate gate; - _f.runInMasterAndSync([&]() { + _f.runInMaster([&]() { feedView->preparePut(op); feedView->handlePut(FeedToken(), op); feedView->forceCommit(CommitParam(op.getSerialNum()), std::make_shared<vespalib::GateCallback>(gate)); @@ -792,7 +799,7 @@ struct DocumentHandler void moveDoc(MoveOperation &op) { IFeedView::SP feedView = _f._subDb.getFeedView(); vespalib::Gate gate; - _f.runInMasterAndSync([&]() { + _f.runInMaster([&]() { auto onDone = std::make_shared<vespalib::GateCallback>(gate); feedView->handleMove(op, onDone); feedView->forceCommit(CommitParam(op.getSerialNum()), onDone); @@ -803,7 +810,7 @@ struct DocumentHandler { IFeedView::SP feedView = _f._subDb.getFeedView(); vespalib::Gate gate; - _f.runInMasterAndSync([&]() { + _f.runInMaster([&]() { feedView->prepareRemove(op); feedView->handleRemove(FeedToken(), op); feedView->forceCommit(CommitParam(op.getSerialNum()), std::make_shared<vespalib::GateCallback>(gate)); diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp index 2c21a30396d..9bc374b8386 100644 --- a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp +++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp @@ -35,18 +35,18 @@ struct ControllerFixtureBase : public ::testing::Test test::BucketHandler _bucketHandler; MyBucketModifiedHandler _modifiedHandler; std::shared_ptr<bucketdb::BucketDBOwner> _bucketDB; - MySubDb _ready; - MySubDb _notReady; - BucketCreateNotifier _bucketCreateNotifier; - test::DiskMemUsageNotifier _diskMemUsageNotifier; - MonitoredRefCount _refCount; - ThreadStackExecutor _singleExecutor; - ExecutorThreadService _master; - DummyBucketExecutor _bucketExecutor; - MyMoveHandler _moveHandler; - DocumentDBTaggedMetrics _metrics; + MySubDb _ready; + MySubDb _notReady; + BucketCreateNotifier _bucketCreateNotifier; + test::DiskMemUsageNotifier _diskMemUsageNotifier; + MonitoredRefCount _refCount; + ThreadStackExecutor _singleExecutor; + SyncableExecutorThreadService _master; + DummyBucketExecutor _bucketExecutor; + MyMoveHandler _moveHandler; + DocumentDBTaggedMetrics _metrics; std::shared_ptr<BucketMoveJob> _bmj; - MyCountJobRunner _runner; + MyCountJobRunner _runner; ControllerFixtureBase(const BlockableMaintenanceJobConfig &blockableConfig, bool storeMoveDoneContexts); ~ControllerFixtureBase(); ControllerFixtureBase &addReady(const BucketId &bucket) { diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp index a24eeb262ab..77f7cf4d8ed 100644 --- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp +++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp @@ -1,10 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <tests/proton/common/dummydbowner.h> +#include <vespa/config-bucketspaces.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/repo/documenttyperepo.h> -#include <vespa/fastos/file.h> #include <vespa/document/test/make_bucket_space.h> +#include <vespa/fastos/file.h> +#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> #include <vespa/searchcore/proton/attribute/flushableattribute.h> #include <vespa/searchcore/proton/common/statusreport.h> #include <vespa/searchcore/proton/docsummary/summaryflushtarget.h> @@ -22,17 +25,16 @@ #include <vespa/searchcore/proton/server/feedhandler.h> #include <vespa/searchcore/proton/server/fileconfigmanager.h> #include <vespa/searchcore/proton/server/memoryconfigstore.h> -#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> +#include <vespa/searchcore/proton/test/mock_shared_threading_service.h> #include <vespa/searchcorespi/index/indexflushtarget.h> #include <vespa/searchlib/attribute/attribute_read_guard.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/searchlib/transactionlog/translogserver.h> #include <vespa/vespalib/data/slime/slime.h> -#include <vespa/vespalib/util/size_literals.h> -#include <vespa/config-bucketspaces.h> #include <vespa/vespalib/io/fileutil.h> #include <vespa/vespalib/stllike/asciistream.h> #include <vespa/vespalib/testkit/test_kit.h> +#include <vespa/vespalib/util/size_literals.h> #include <iostream> using namespace cloud::config::filedistribution; @@ -42,7 +44,6 @@ using namespace std::chrono_literals; using document::DocumentType; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; using document::test::makeBucketSpace; using search::SerialNum; using search::TuneFileDocumentDB; @@ -118,6 +119,7 @@ struct Fixture : public FixtureBase { DummyWireService _dummy; MyDBOwner _myDBOwner; vespalib::ThreadStackExecutor _summaryExecutor; + MockSharedThreadingService _shared_service; HwInfo _hwInfo; storage::spi::dummy::DummyBucketExecutor _bucketExecutor; DocumentDB::SP _db; @@ -142,6 +144,7 @@ Fixture::Fixture(bool file_config) _dummy(), _myDBOwner(), _summaryExecutor(8, 128_Ki), + _shared_service(_summaryExecutor, _summaryExecutor), _hwInfo(), _bucketExecutor(2), _db(), @@ -165,7 +168,7 @@ Fixture::Fixture(bool file_config) mgr.nextGeneration(0ms); _db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"), makeBucketSpace(), - *b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy, + *b->getProtonConfigSP(), _myDBOwner, _shared_service, _bucketExecutor, _tls, _dummy, _fileHeaderContext, make_config_store(), std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo); _db->start(); diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp b/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp index 0d07da8858a..78a119c638a 100644 --- a/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp +++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp @@ -8,6 +8,7 @@ #include <vespa/vespalib/testkit/testapp.h> #include <vespa/config-summary.h> #include <vespa/config-summarymap.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/documenttyperepo.h> diff --git a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp index 32707f8a69f..8d7e842bc89 100644 --- a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp +++ b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp @@ -10,6 +10,7 @@ using vespalib::ISequencedTaskExecutor; using vespalib::SequencedTaskExecutor; using SharedFieldWriterExecutor = ThreadingServiceConfig::SharedFieldWriterExecutor; +VESPA_THREAD_STACK_TAG(my_field_writer_executor) SequencedTaskExecutor* to_concrete_type(ISequencedTaskExecutor& exec) @@ -20,14 +21,18 @@ to_concrete_type(ISequencedTaskExecutor& exec) class ExecutorThreadingServiceTest : public ::testing::Test { public: vespalib::ThreadStackExecutor shared_executor; + std::unique_ptr<ISequencedTaskExecutor> field_writer_executor; std::unique_ptr<ExecutorThreadingService> service; ExecutorThreadingServiceTest() : shared_executor(1, 1000), + field_writer_executor(SequencedTaskExecutor::create(my_field_writer_executor, 3, 200)), service() { } void setup(uint32_t indexing_threads, SharedFieldWriterExecutor shared_field_writer) { service = std::make_unique<ExecutorThreadingService>(shared_executor, + field_writer_executor.get(), + nullptr, ThreadingServiceConfig::make(indexing_threads, shared_field_writer)); } SequencedTaskExecutor* index_inverter() { @@ -39,6 +44,9 @@ public: SequencedTaskExecutor* attribute_writer() { return to_concrete_type(service->attributeFieldWriter()); } + SequencedTaskExecutor* field_writer() { + return to_concrete_type(*field_writer_executor); + } }; void @@ -75,6 +83,15 @@ TEST_F(ExecutorThreadingServiceTest, shared_executor_for_index_and_attribute_fie assert_executor(index_inverter(), 12, 100); } +TEST_F(ExecutorThreadingServiceTest, shared_field_writer_specified_from_the_outside) +{ + setup(4, SharedFieldWriterExecutor::DOCUMENT_DB); + EXPECT_EQ(field_writer(), index_inverter()); + EXPECT_EQ(field_writer(), index_writer()); + EXPECT_EQ(field_writer(), attribute_writer()); + assert_executor(field_writer(), 3, 200); +} + TEST_F(ExecutorThreadingServiceTest, tasks_limits_can_be_updated) { setup(4, SharedFieldWriterExecutor::NONE); diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp index bffedfd8dab..904937a26da 100644 --- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp +++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp @@ -234,9 +234,9 @@ struct MyFeedView : public test::DummyFeedView { (void) token; ++remove_count; } - void handleMove(const MoveOperation &, IDestructorCallback::SP) override { ++move_count; } - void heartBeat(SerialNum) override { ++heartbeat_count; } - void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override { ++prune_removed_count; } + void handleMove(const MoveOperation &, DoneCallback) override { ++move_count; } + void heartBeat(SerialNum, DoneCallback) override { ++heartbeat_count; } + void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override { ++prune_removed_count; } const ISimpleDocumentMetaStore *getDocumentMetaStorePtr() const override { return nullptr; } @@ -747,6 +747,20 @@ TEST_F("require that put with different document type repo is ok", FeedHandlerFi EXPECT_EQUAL(1, f.tls_writer.store_count); } +TEST_F("require that feed stats are updated", FeedHandlerFixture) +{ + DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder); + auto op =std::make_unique<PutOperation>(doc_context.bucketId, Timestamp(10), std::move(doc_context.doc)); + FeedTokenContext token_context; + f.handler.performOperation(std::move(token_context.token), std::move(op)); + f.syncMaster(); // wait for initateCommit + f.syncMaster(); // wait for onCommitDone + auto stats = f.handler.get_stats(false); + EXPECT_EQUAL(1u, stats.get_commits()); + EXPECT_EQUAL(1u, stats.get_operations()); + EXPECT_LESS(0.0, stats.get_total_latency()); +} + using namespace document; TEST_F("require that update with a fieldpath update will be rejected", SchemaContext) { diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp index 54772f353d0..824a9273404 100644 --- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp +++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp @@ -22,6 +22,7 @@ #include <vespa/searchcore/proton/test/threading_service_observer.h> #include <vespa/searchlib/attribute/attributefactory.h> #include <vespa/document/update/documentupdate.h> +#include <vespa/vespalib/util/destructor_callbacks.h> #include <vespa/searchlib/index/docbuilder.h> #include <vespa/log/log.h> @@ -37,6 +38,8 @@ using search::AttributeVector; using search::CacheStats; using search::DocumentMetaData; using vespalib::IDestructorCallback; +using vespalib::Gate; +using vespalib::GateCallback; using search::SearchableStats; using search::index::schema::CollectionType; using search::index::schema::DataType; @@ -364,7 +367,7 @@ struct MyAttributeWriter : public IAttributeWriter (void) doc; (void) lid; } - void heartBeat(SerialNum) override { ++_heartBeatCount; } + void heartBeat(SerialNum, OnWriteDoneType) override { ++_heartBeatCount; } void compactLidSpace(uint32_t wantedLidLimit, SerialNum ) override { _wantedLidLimit = wantedLidLimit; } @@ -403,7 +406,7 @@ MyAttributeWriter::~MyAttributeWriter() = default; struct MyTransport : public feedtoken::ITransport { ResultUP lastResult; - vespalib::Gate _gate; + Gate _gate; MyTracer &_tracer; MyTransport(MyTracer &tracer); ~MyTransport(); @@ -512,8 +515,8 @@ struct FixtureBase } template <typename FunctionType> - void runInMasterAndSync(FunctionType func) { - test::runInMasterAndSync(_writeService, func); + void runInMaster(FunctionType func) { + test::runInMaster(_writeService, func); } virtual IFeedView &getFeedView() = 0; @@ -558,7 +561,7 @@ struct FixtureBase void putAndWait(const DocumentContext &docCtx) { FeedTokenContext token(_tracer); PutOperation op(docCtx.bid, docCtx.ts, docCtx.doc); - runInMasterAndSync([this, ft = std::move(token.ft), &op]() mutable { performPut(std::move(ft), op); }); + runInMaster([this, ft = std::move(token.ft), &op]() mutable { performPut(std::move(ft), op); }); token.mt.await(); } @@ -571,7 +574,7 @@ struct FixtureBase void updateAndWait(const DocumentContext &docCtx) { FeedTokenContext token(_tracer); UpdateOperation op(docCtx.bid, docCtx.ts, docCtx.upd); - runInMasterAndSync([this, ft = std::move(token.ft), &op]() mutable { performUpdate(std::move(ft), op); }); + runInMaster([this, ft = std::move(token.ft), &op]() mutable { performUpdate(std::move(ft), op); }); token.mt.await(); } @@ -586,7 +589,7 @@ struct FixtureBase void removeAndWait(const DocumentContext &docCtx) { FeedTokenContext token(_tracer); RemoveOperationWithDocId op(docCtx.bid, docCtx.ts, docCtx.doc->getId()); - runInMasterAndSync([this, ft = std::move(token.ft), &op]() mutable { performRemove(std::move(ft), op); }); + runInMaster([this, ft = std::move(token.ft), &op]() mutable { performRemove(std::move(ft), op); }); token.mt.await(); } @@ -596,26 +599,35 @@ struct FixtureBase } } - void performMove(MoveOperation &op) { + void performMove(MoveOperation &op, IDestructorCallback::SP onDone) { op.setSerialNum(++serial); - getFeedView().handleMove(op, IDestructorCallback::SP()); + getFeedView().handleMove(op, std::move(onDone)); } void moveAndWait(const DocumentContext &docCtx, uint32_t fromLid, uint32_t toLid) { MoveOperation op(docCtx.bid, docCtx.ts, docCtx.doc, DbDocumentId(pc._params._subDbId, fromLid), pc._params._subDbId); op.setTargetLid(toLid); - runInMasterAndSync([&]() { performMove(op); }); + Gate gate; + runInMaster([&, onDone=std::make_shared<GateCallback>(gate)]() { performMove(op, std::move(onDone)); }); + gate.await(); } - void performDeleteBucket(DeleteBucketOperation &op) { + void performDeleteBucket(DeleteBucketOperation &op, IDestructorCallback::SP onDone) { getFeedView().prepareDeleteBucket(op); op.setSerialNum(++serial); - getFeedView().handleDeleteBucket(op); + getFeedView().handleDeleteBucket(op, onDone); } - void performForceCommit() { getFeedView().forceCommit(serial); } + void performForceCommit(IDestructorCallback::SP onDone) { + getFeedView().forceCommit(serial, std::move(onDone)); + } void forceCommitAndWait() { - runInMasterAndSync([&]() { performForceCommit(); }); + Gate gate; + runInMaster([this, onDone=std::make_shared<GateCallback>(gate)]() { + performForceCommit(std::move(onDone)); + }); + gate.await(); + _writeService.master().sync(); } bool assertTrace(const vespalib::string &exp) { @@ -635,14 +647,19 @@ struct FixtureBase return docs; } - void performCompactLidSpace(uint32_t wantedLidLimit) { + void performCompactLidSpace(uint32_t wantedLidLimit, IDestructorCallback::SP onDone) { auto &fv = getFeedView(); CompactLidSpaceOperation op(0, wantedLidLimit); op.setSerialNum(++serial); - fv.handleCompactLidSpace(op); + fv.handleCompactLidSpace(op, onDone); } void compactLidSpaceAndWait(uint32_t wantedLidLimit) { - runInMasterAndSync([&]() { performCompactLidSpace(wantedLidLimit); }); + Gate gate; + runInMaster([&]() { + performCompactLidSpace(wantedLidLimit, std::make_shared<GateCallback>(gate)); + }); + gate.await(); + _writeService.master().sync(); } void assertChangeHandler(document::GlobalId expGid, uint32_t expLid, uint32_t expChanges) { _gidToLidChangeHandler->assertChanges(expGid, expLid, expChanges); @@ -707,8 +724,7 @@ struct SearchableFeedViewFixture : public FixtureBase SearchableFeedView::Context(iw)) { } - ~SearchableFeedViewFixture() override - { + ~SearchableFeedViewFixture() override { forceCommitAndWait(); } IFeedView &getFeedView() override { return fv; } @@ -725,8 +741,7 @@ struct FastAccessFeedViewFixture : public FixtureBase FastAccessFeedView::Context(aw, _docIdLimit)) { } - ~FastAccessFeedViewFixture() override - { + ~FastAccessFeedViewFixture() override { forceCommitAndWait(); } IFeedView &getFeedView() override { return fv; } @@ -931,7 +946,11 @@ TEST_F("require that handleDeleteBucket() removes documents", SearchableFeedView // delete bucket for user 1 DeleteBucketOperation op(docs[0].bid); - f.runInMasterAndSync([&]() { f.performDeleteBucket(op); }); + vespalib::Gate gate; + f.runInMaster([&, onDone=std::make_shared<GateCallback>(gate)]() { + f.performDeleteBucket(op, std::move(onDone)); + }); + gate.await(); f.dms_commit(); EXPECT_EQUAL(0u, f.getBucketDB()->get(docs[0].bid).getDocumentCount()); @@ -1034,7 +1053,11 @@ TEST_F("require that removes are not remembered", SearchableFeedViewFixture) TEST_F("require that heartbeat propagates to index- and attributeadapter", SearchableFeedViewFixture) { - f.runInMasterAndSync([&]() { f.fv.heartBeat(2); }); + vespalib::Gate gate; + f.runInMaster([&, onDone = std::make_shared<vespalib::GateCallback>(gate)]() { + f.fv.heartBeat(2, std::move(onDone)); + }); + gate.await(); EXPECT_EQUAL(1, f.miw._heartBeatCount); EXPECT_EQUAL(1, f.maw._heartBeatCount); } @@ -1146,7 +1169,12 @@ TEST_F("require that compactLidSpace() doesn't propagate to " EXPECT_TRUE(assertThreadObserver(5, 4, 4, f.writeServiceObserver())); CompactLidSpaceOperation op(0, 2); op.setSerialNum(0); - f.runInMasterAndSync([&]() { f.fv.handleCompactLidSpace(op); }); + Gate gate; + f.runInMaster([&, onDone=std::make_shared<GateCallback>(gate)]() { + f.fv.handleCompactLidSpace(op, std::move(onDone)); + }); + gate.await(); + f._writeService.master().sync(); // Delayed holdUnblockShrinkLidSpace() in index thread, then master thread EXPECT_TRUE(assertThreadObserver(6, 6, 5, f.writeServiceObserver())); EXPECT_EQUAL(0u, f.metaStoreObserver()._compactLidSpaceLidLimit); @@ -1154,24 +1182,21 @@ TEST_F("require that compactLidSpace() doesn't propagate to " EXPECT_EQUAL(0u, f.metaStoreObserver()._holdUnblockShrinkLidSpaceCnt); } -TEST_F("require that compactLidSpace() propagates to attributeadapter", - FastAccessFeedViewFixture) +TEST_F("require that compactLidSpace() propagates to attributeadapter", FastAccessFeedViewFixture) { f.populateBeforeCompactLidSpace(); f.compactLidSpaceAndWait(2); EXPECT_EQUAL(2u, f.maw._wantedLidLimit); } -TEST_F("require that compactLidSpace() propagates to index writer", - SearchableFeedViewFixture) +TEST_F("require that compactLidSpace() propagates to index writer", SearchableFeedViewFixture) { f.populateBeforeCompactLidSpace(); f.compactLidSpaceAndWait(2); EXPECT_EQUAL(2u, f.miw._wantedLidLimit); } -TEST_F("require that commit is not implicitly called", - SearchableFeedViewFixture) +TEST_F("require that commit is not implicitly called", SearchableFeedViewFixture) { DocumentContext dc = f.doc1(); f.putAndWait(dc); @@ -1191,8 +1216,7 @@ TEST_F("require that commit is not implicitly called", f.forceCommitAndWait(); } -TEST_F("require that forceCommit updates docid limit", - SearchableFeedViewFixture) +TEST_F("require that forceCommit updates docid limit", SearchableFeedViewFixture) { DocumentContext dc = f.doc1(); f.putAndWait(dc); diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp index 469cffaa31a..68b9d2f8d6e 100644 --- a/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp +++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp @@ -8,6 +8,7 @@ #include <vespa/config-summary.h> #include <vespa/config-summarymap.h> #include <vespa/config/helper/configgetter.hpp> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/searchcore/proton/server/bootstrapconfig.h> #include <vespa/searchcore/proton/server/fileconfigmanager.h> @@ -40,10 +41,10 @@ vespalib::string myId("myconfigid"); DocumentDBConfig::SP makeBaseConfigSnapshot() { - config::DirSpec spec(TEST_PATH("cfg")); + ::config::DirSpec spec(TEST_PATH("cfg")); DBCM dbcm(spec, "test"); - DocumenttypesConfigSP dtcfg(config::ConfigGetter<DocumenttypesConfig>::getConfig("", spec).release()); + DocumenttypesConfigSP dtcfg(::config::ConfigGetter<DocumenttypesConfig>::getConfig("", spec).release()); auto b = std::make_shared<BootstrapConfig>(1, dtcfg, std::make_shared<DocumentTypeRepo>(*dtcfg), std::make_shared<ProtonConfig>(), diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp index 3abeaf37062..57989688a4f 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp @@ -19,8 +19,8 @@ MyScanIterator::valid() const { return _validItr; } -search::DocumentMetaData MyScanIterator::next(uint32_t compactLidLimit, bool retry) { - if (!retry && _itr != _lids.begin()) { +search::DocumentMetaData MyScanIterator::next(uint32_t compactLidLimit) { + if (_itr != _lids.begin()) { ++_itr; } for (; _itr != _lids.end() && (*_itr) <= compactLidLimit; ++_itr) {} diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h index 42976104836..b404fc6956a 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h @@ -15,6 +15,7 @@ #include <vespa/searchcore/proton/test/clusterstatehandler.h> #include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> #include <vespa/searchcore/proton/test/test.h> +#include <vespa/searchcore/proton/test/dummy_document_store.h> #include <vespa/vespalib/util/idestructorcallback.h> #include <vespa/searchlib/index/docbuilder.h> @@ -55,7 +56,7 @@ struct MyScanIterator : public IDocumentScanIterator { explicit MyScanIterator(const MyHandler & handler, const LidVector &lids); ~MyScanIterator() override; bool valid() const override; - search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) override; + search::DocumentMetaData next(uint32_t compactLidLimit) override; }; struct MyHandler : public ILidSpaceCompactionHandler { diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp index a2d4315dad5..c01a1a65c46 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp @@ -250,6 +250,7 @@ TEST_F(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move unblockJob(1); assertRunToNotBlocked(); assertJobContext(4, 7, 3, 0, 0); + unblockJob(1); endScan().compact(); assertJobContext(4, 7, 3, 7, 1); sync(); diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp index 13955953eb5..8f88d678c0c 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp @@ -54,7 +54,7 @@ JobTestBase::init(uint32_t allowedLidBloat, _job.reset(); _singleExecutor = std::make_unique<vespalib::ThreadStackExecutor>(1, 0x10000); - _master = std::make_unique<proton::ExecutorThreadService> (*_singleExecutor); + _master = std::make_unique<proton::SyncableExecutorThreadService> (*_singleExecutor); _bucketExecutor = std::make_unique<storage::spi::dummy::DummyBucketExecutor>(4); _job = lidspace::CompactionJob::create(compactCfg, RetainGuard(_refCount), _handler, _storer, *_master, *_bucketExecutor, _diskMemUsageNotifier, blockableCfg, _clusterStateHandler, nodeRetired, diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h index 14f2ff42dbe..5875910f4d9 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h @@ -14,7 +14,7 @@ struct JobTestBase : public ::testing::Test { test::DiskMemUsageNotifier _diskMemUsageNotifier; std::unique_ptr<storage::spi::dummy::DummyBucketExecutor> _bucketExecutor; std::unique_ptr<vespalib::SyncableThreadExecutor> _singleExecutor; - std::unique_ptr<searchcorespi::index::IThreadService> _master; + std::unique_ptr<searchcorespi::index::ISyncableThreadService> _master; std::shared_ptr<MyHandler> _handler; MyStorer _storer; std::shared_ptr<BlockableMaintenanceJob> _job; diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp index 8940b01b91d..227e885564d 100644 --- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp +++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp @@ -326,7 +326,7 @@ class MaintenanceControllerFixture public: MyExecutor _executor; MyExecutor _genericExecutor; - ExecutorThreadService _threadService; + SyncableExecutorThreadService _threadService; DummyBucketExecutor _bucketExecutor; DocTypeName _docTypeName; test::UserDocumentsBuilder _builder; diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp index 8ccf4f792c5..b1d7ee1d0a8 100644 --- a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp +++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp @@ -40,12 +40,12 @@ namespace { class MySummaryAdapter : public test::MockSummaryAdapter { private: - int &_rmCount; - int &_putCount; - int &_heartbeatCount; + std::atomic<int> &_rmCount; + std::atomic<int> &_putCount; + std::atomic<int> &_heartbeatCount; public: - MySummaryAdapter(int &removeCount, int &putCount, int &heartbeatCount) noexcept + MySummaryAdapter(std::atomic<int> &removeCount, std::atomic<int> &putCount, std::atomic<int> &heartbeatCount) noexcept : _rmCount(removeCount), _putCount(putCount), _heartbeatCount(heartbeatCount) { @@ -78,18 +78,18 @@ struct MyMinimalFeedViewBase struct MyMinimalFeedView : public MyMinimalFeedViewBase, public StoreOnlyFeedView { using UP = std::unique_ptr<MyMinimalFeedView>; - int removeMultiAttributesCount; - int removeMultiIndexFieldsCount; - int heartBeatAttributesCount; - int heartBeatIndexedFieldsCount; - int &outstandingMoveOps; + std::atomic<int> removeMultiAttributesCount; + std::atomic<int> removeMultiIndexFieldsCount; + std::atomic<int> heartBeatAttributesCount; + std::atomic<int> heartBeatIndexedFieldsCount; + std::atomic<int> &outstandingMoveOps; MyMinimalFeedView(const ISummaryAdapter::SP &summaryAdapter, const DocumentMetaStore::SP &metaStore, searchcorespi::index::IThreadingService &writeService, const PersistentParams ¶ms, std::shared_ptr<PendingLidTrackerBase> pendingLidsForCommit, - int &outstandingMoveOps_) : + std::atomic<int> &outstandingMoveOps_) : MyMinimalFeedViewBase(), StoreOnlyFeedView(StoreOnlyFeedView::Context(summaryAdapter, search::index::Schema::SP(), @@ -114,12 +114,12 @@ struct MyMinimalFeedView : public MyMinimalFeedViewBase, public StoreOnlyFeedVie StoreOnlyFeedView::removeIndexedFields(s, l, onWriteDone); ++removeMultiIndexFieldsCount; } - void heartBeatIndexedFields(SerialNum s) override { - StoreOnlyFeedView::heartBeatIndexedFields(s); + void heartBeatIndexedFields(SerialNum s, DoneCallback onDone) override { + StoreOnlyFeedView::heartBeatIndexedFields(s, onDone); ++heartBeatIndexedFieldsCount; } - void heartBeatAttributes(SerialNum s) override { - StoreOnlyFeedView::heartBeatAttributes(s); + void heartBeatAttributes(SerialNum s, DoneCallback onDone) override { + StoreOnlyFeedView::heartBeatAttributes(s, onDone); ++heartBeatAttributesCount; } }; @@ -127,17 +127,18 @@ struct MyMinimalFeedView : public MyMinimalFeedViewBase, public StoreOnlyFeedVie struct MoveOperationFeedView : public MyMinimalFeedView { using UP = std::unique_ptr<MoveOperationFeedView>; - int putAttributesCount; - int putIndexFieldsCount; - int removeAttributesCount; - int removeIndexFieldsCount; + std::atomic<int> putAttributesCount; + std::atomic<int> putIndexFieldsCount; + std::atomic<int> removeAttributesCount; + std::atomic<int> removeIndexFieldsCount; std::vector<IDestructorCallback::SP> onWriteDoneContexts; + std::mutex _mutex; MoveOperationFeedView(const ISummaryAdapter::SP &summaryAdapter, const DocumentMetaStore::SP &metaStore, searchcorespi::index::IThreadingService &writeService, const PersistentParams ¶ms, std::shared_ptr<PendingLidTrackerBase> pendingLidsForCommit, - int &outstandingMoveOps_) : + std::atomic<int> &outstandingMoveOps_) : MyMinimalFeedView(summaryAdapter, metaStore, writeService, params, std::move(pendingLidsForCommit), outstandingMoveOps_), putAttributesCount(0), @@ -149,30 +150,50 @@ struct MoveOperationFeedView : public MyMinimalFeedView { void putAttributes(SerialNum, search::DocumentIdT, const document::Document &, OnPutDoneType onWriteDone) override { ++putAttributesCount; EXPECT_EQUAL(1, outstandingMoveOps); + std::lock_guard guard(_mutex); onWriteDoneContexts.push_back(onWriteDone); } void putIndexedFields(SerialNum, search::DocumentIdT, const document::Document::SP &, OnOperationDoneType onWriteDone) override { ++putIndexFieldsCount; EXPECT_EQUAL(1, outstandingMoveOps); + std::lock_guard guard(_mutex); onWriteDoneContexts.push_back(onWriteDone); } void removeAttributes(SerialNum, search::DocumentIdT, OnRemoveDoneType onWriteDone) override { ++removeAttributesCount; EXPECT_EQUAL(1, outstandingMoveOps); + std::lock_guard guard(_mutex); onWriteDoneContexts.push_back(onWriteDone); } void removeIndexedFields(SerialNum, search::DocumentIdT, OnRemoveDoneType onWriteDone) override { ++removeIndexFieldsCount; EXPECT_EQUAL(1, outstandingMoveOps); + std::lock_guard guard(_mutex); onWriteDoneContexts.push_back(onWriteDone); } - void clearWriteDoneContexts() { onWriteDoneContexts.clear(); } + void clearWriteDoneContexts() { + std::lock_guard guard(_mutex); + onWriteDoneContexts.clear(); + } + void waitFor(uint32_t expected) { + while (true) { + std::lock_guard guard(_mutex); + if (expected == onWriteDoneContexts.size()) { + bool ok = true; + for (uint32_t i(0); ok && i < expected; i++) { + // One for attributes, and one for indexes + ok = (onWriteDoneContexts[i].use_count() == 2); + } + if (ok) return; + } + } + } }; struct MoveOperationCallback : public IDestructorCallback { - int &outstandingMoveOps; - explicit MoveOperationCallback(int &outstandingMoveOps_) noexcept : outstandingMoveOps(outstandingMoveOps_) { + std::atomic<int> &outstandingMoveOps; + explicit MoveOperationCallback(std::atomic<int> &outstandingMoveOps_) noexcept : outstandingMoveOps(outstandingMoveOps_) { ++outstandingMoveOps; } ~MoveOperationCallback() override { @@ -185,10 +206,10 @@ const uint32_t subdb_id = 0; template <typename FeedViewType> struct FixtureBase { - int removeCount; - int putCount; - int heartbeatCount; - int outstandingMoveOps; + std::atomic<int> removeCount; + std::atomic<int> putCount; + std::atomic<int> heartbeatCount; + std::atomic<int> outstandingMoveOps; DocumentMetaStore::SP metaStore; vespalib::ThreadStackExecutor sharedExecutor; ExecutorThreadingService writeService; @@ -244,10 +265,14 @@ struct FixtureBase { void runInMasterAndSync(FunctionType func) { test::runInMasterAndSync(writeService, func); } + template <typename FunctionType> + void runInMaster(FunctionType func) { + test::runInMaster(writeService, func); + } void force_commit() { vespalib::Gate gate; - runInMasterAndSync([this, &gate]() { + runInMaster([this, &gate]() { feedview->forceCommit(search::CommitParam(serial_num), std::make_shared<vespalib::GateCallback>(gate)); }); gate.await(); @@ -279,11 +304,25 @@ struct MoveFixture : public FixtureBase<MoveOperationFeedView> { feedview->clearWriteDoneContexts(); EXPECT_EQUAL(0, outstandingMoveOps); } + + void handleMove(const MoveOperation & op, long expected) { + auto ctx = beginMoveOp(); + runInMasterAndSync([&, ctx]() { + feedview->handleMove(op, std::move(ctx)); + }); + // First we wait for everything propagated to MinimalFeedView + while (ctx.use_count() > (expected + 1)) { + LOG(info, "use_count = %ld", ctx.use_count()); + std::this_thread::sleep_for(1s); + } + // And then we must wait for everyone else to finish up too. + feedview->waitFor(expected*2); + } }; TEST_F("require that prepareMove sets target db document id", Fixture) { - Document::SP doc(new Document); + auto doc = std::make_shared<Document>(); MoveOperation op(BucketId(20, 42), Timestamp(10), doc, 1, subdb_id + 1); f.runInMasterAndSync([&]() { f.feedview->prepareMove(op); }); @@ -314,7 +353,7 @@ TEST_F("require that handleMove() adds document to target and removes it from so MoveOperation::UP op = makeMoveOp(DbDocumentId(subdb_id + 1, 1), subdb_id); TEST_DO(f.assertPutCount(0)); f.runInMasterAndSync([&]() { f.feedview->prepareMove(*op); }); - f.runInMasterAndSync([&]() { f.feedview->handleMove(*op, f.beginMoveOp()); }); + f.handleMove(*op, 1); TEST_DO(f.assertPutCount(1)); TEST_DO(f.assertAndClearMoveOp()); lid = op->getDbDocumentId().getLid(); @@ -326,7 +365,7 @@ TEST_F("require that handleMove() adds document to target and removes it from so MoveOperation::UP op = makeMoveOp(DbDocumentId(subdb_id, 1), subdb_id + 1); op->setDbDocumentId(DbDocumentId(subdb_id + 1, 1)); TEST_DO(f.assertRemoveCount(0)); - f.runInMasterAndSync([&]() { f.feedview->handleMove(*op, f.beginMoveOp()); }); + f.handleMove(*op, 1); EXPECT_FALSE(f.metaStore->validLid(lid)); TEST_DO(f.assertRemoveCount(1)); TEST_DO(f.assertAndClearMoveOp()); @@ -335,7 +374,7 @@ TEST_F("require that handleMove() adds document to target and removes it from so TEST_F("require that handleMove() handles move within same subdb and propagates destructor callback", MoveFixture) { - Document::SP doc(new Document); + auto doc = std::make_shared<Document>(); DocumentId doc1id("id:test:foo:g=foo:1"); uint32_t docSize = 1; f.runInMasterAndSync([&]() { @@ -354,7 +393,7 @@ TEST_F("require that handleMove() handles move within same subdb and propagates op->setTargetLid(1); TEST_DO(f.assertPutCount(0)); TEST_DO(f.assertRemoveCount(0)); - f.runInMasterAndSync([&]() { f.feedview->handleMove(*op, f.beginMoveOp()); }); + f.handleMove(*op, 2); TEST_DO(f.assertPutCount(1)); TEST_DO(f.assertRemoveCount(1)); TEST_DO(f.assertAndClearMoveOp()); @@ -368,13 +407,17 @@ TEST_F("require that prune removed documents removes documents", { f.addDocsToMetaStore(3); - LidVectorContext::SP lids(new LidVectorContext(4)); + auto lids = std::make_shared<LidVectorContext>(4); lids->addLid(1); lids->addLid(3); PruneRemovedDocumentsOperation op(lids->getDocIdLimit(), subdb_id); op.setLidsToRemove(lids); op.setSerialNum(1); // allows use of meta store. - f.runInMasterAndSync([&]() { f.feedview->handlePruneRemovedDocuments(op); }); + vespalib::Gate gate; + f.runInMaster([&, onDone=std::make_shared<vespalib::GateCallback>(gate)]() { + f.feedview->handlePruneRemovedDocuments(op, std::move(onDone)); + }); + gate.await(); EXPECT_EQUAL(2, f.removeCount); EXPECT_FALSE(f.metaStore->validLid(1)); @@ -390,7 +433,11 @@ TEST_F("require that heartbeat propagates and commits meta store", Fixture) EXPECT_EQUAL(0, f.feedview->heartBeatIndexedFieldsCount); EXPECT_EQUAL(0, f.feedview->heartBeatAttributesCount); EXPECT_EQUAL(0, f.heartbeatCount); - f.runInMasterAndSync([&]() { f.feedview->heartBeat(2); }); + vespalib::Gate gate; + f.runInMaster([&, onDone = std::make_shared<vespalib::GateCallback>(gate)]() { + f.feedview->heartBeat(2, std::move(onDone)); + }); + gate.await(); EXPECT_EQUAL(2u, f.metaStore->getStatus().getLastSyncToken()); EXPECT_EQUAL(1, f.feedview->heartBeatIndexedFieldsCount); EXPECT_EQUAL(1, f.feedview->heartBeatAttributesCount); diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp index 7be591099bc..8a2e2084978 100644 --- a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp +++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp @@ -7,6 +7,7 @@ #include <vespa/searchcore/proton/test/thread_utils.h> #include <vespa/searchcore/proton/test/threading_service_observer.h> #include <vespa/vespalib/util/lambdatask.h> +#include <vespa/vespalib/util/destructor_callbacks.h> #include <vespa/log/log.h> LOG_SETUP("lidreusedelayer_test"); @@ -144,15 +145,9 @@ public: _store.removes_complete(lids); } - void performCycleLids(const std::vector<uint32_t> &lids) { - _writeService.master().execute(makeLambdaTask([this, lids]() { cycledLids(lids);})); - } + void performCycleLids(const std::vector<uint32_t> &lids, vespalib::IDestructorCallback::SP onDone); - void cycleLids(const std::vector<uint32_t> &lids) { - if (lids.empty()) - return; - _writeService.index().execute(makeLambdaTask([this, lids]() { performCycleLids(lids);})); - } + void cycleLids(const std::vector<uint32_t> &lids, vespalib::IDestructorCallback::SP onDone); void delayReuse(uint32_t lid) { runInMasterAndSync([&]() { _lidReuseDelayer->delayReuse(lid); }); @@ -163,10 +158,32 @@ public: } void commit() { - runInMasterAndSync([&]() { cycleLids(_lidReuseDelayer->getReuseLids()); }); + vespalib::Gate gate; + test::runInMaster(_writeService, [this, onDone=std::make_shared<vespalib::GateCallback>(gate)]() { + cycleLids(_lidReuseDelayer->getReuseLids(), std::move(onDone)); + }); + gate.await(); } }; +void +Fixture::cycleLids(const std::vector<uint32_t> &lids, vespalib::IDestructorCallback::SP onDone) { + if (lids.empty()) + return; + _writeService.index().execute(makeLambdaTask([this, lids, onDone]() { + (void) onDone; + performCycleLids(lids, onDone); + })); +} + +void +Fixture::performCycleLids(const std::vector<uint32_t> &lids, vespalib::IDestructorCallback::SP onDone) { + _writeService.master().execute(makeLambdaTask([this, lids, onDone]() { + (void) onDone; + cycledLids(lids); + })); +} + TEST_F("require that nothing happens before free list is active", Fixture) { f.delayReuse(4); diff --git a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp index 567fbf5dfec..d79b46b2e08 100644 --- a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp +++ b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp @@ -16,6 +16,7 @@ #include <vespa/searchlib/query/base.h> #include <persistence/spi/types.h> #include <vespa/document/base/documentid.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/datatype.h> #include <vespa/document/fieldvalue/document.h> #include <vespa/document/update/documentupdate.h> diff --git a/searchcore/src/tests/proton/index/indexcollection_test.cpp b/searchcore/src/tests/proton/index/indexcollection_test.cpp index 07fbacde49a..70141f057bf 100644 --- a/searchcore/src/tests/proton/index/indexcollection_test.cpp +++ b/searchcore/src/tests/proton/index/indexcollection_test.cpp @@ -25,7 +25,7 @@ public: MockIndexSearchable() : _field_length_info() {} - MockIndexSearchable(const FieldLengthInfo& field_length_info) + explicit MockIndexSearchable(const FieldLengthInfo& field_length_info) : _field_length_info(field_length_info) {} FieldLengthInfo get_field_length_info(const vespalib::string& field_name) const override { @@ -79,17 +79,17 @@ public: return std::make_unique<WarmupIndexCollection>(WarmupConfig(1s, false), prev, next, *_warmup, _executor, *this); } - virtual void warmupDone(ISearchableIndexCollection::SP current) override { + void warmupDone(std::shared_ptr<WarmupIndexCollection> current) override { (void) current; } IndexCollectionTest() - : _selector(new FixedSourceSelector(0, "fs1")), - _source1(new MockIndexSearchable({3, 5})), - _source2(new MockIndexSearchable({7, 11})), - _fusion_source(new FakeIndexSearchable), + : _selector(std::make_shared<FixedSourceSelector>(0, "fs1")), + _source1(std::make_shared<MockIndexSearchable>(FieldLengthInfo(3, 5))), + _source2(std::make_shared<MockIndexSearchable>(FieldLengthInfo(7, 11))), + _fusion_source(std::make_shared<FakeIndexSearchable>()), _executor(1, 128_Ki), - _warmup(new FakeIndexSearchable) + _warmup(std::make_shared<FakeIndexSearchable>()) {} ~IndexCollectionTest() = default; }; diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp index d6bbc77aa09..1e33482b055 100644 --- a/searchcore/src/tests/proton/index/indexmanager_test.cpp +++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp @@ -210,8 +210,8 @@ IndexManagerTest::resetIndexManager() { _index_manager.reset(); _index_manager = std::make_unique<IndexManager>(index_dir, IndexConfig(), getSchema(), 1, - _reconfigurer, _writeService, _writeService.master(), - TuneFileIndexManager(), TuneFileAttributes(),_fileHeaderContext); + _reconfigurer, _writeService, _sharedExecutor, + TuneFileIndexManager(), TuneFileAttributes(), _fileHeaderContext); } void diff --git a/searchcore/src/tests/proton/matchengine/matchengine.cpp b/searchcore/src/tests/proton/matchengine/matchengine.cpp index 481a9f061be..34c36fd9a72 100644 --- a/searchcore/src/tests/proton/matchengine/matchengine.cpp +++ b/searchcore/src/tests/proton/matchengine/matchengine.cpp @@ -17,7 +17,7 @@ class MySearchHandler : public ISearchHandler { std::string _name; std::string _reply; public: - MySearchHandler(size_t numHits = 0) : + explicit MySearchHandler(size_t numHits = 0) : _numHits(numHits), _name("my"), _reply("myreply") {} DocsumReply::UP getDocsums(const DocsumRequest &) override { @@ -91,6 +91,7 @@ assertSearchReply(MatchEngine & engine, const std::string & searchDocType, size_ LocalSearchClient client; engine.search(SearchRequest::Source(request), client); SearchReply::UP reply = client.getReply(10000); + ASSERT_TRUE(reply); return EXPECT_EQUAL(expHits, reply->hits.size()); } @@ -173,11 +174,24 @@ TEST("requireThatEmptySearchReplyIsReturnedWhenEngineIsClosed") LocalSearchClient client; SearchRequest::Source request(new SearchRequest()); SearchReply::UP reply = engine.search(std::move(request), client); - EXPECT_TRUE(reply ); + ASSERT_TRUE(reply); EXPECT_EQUAL(0u, reply->hits.size()); EXPECT_EQUAL(7u, reply->getDistributionKey()); } +namespace { + +constexpr const char* search_interface_offline_slime_str() noexcept { + return "{\n" + " \"status\": {\n" + " \"state\": \"OFFLINE\",\n" + " \"message\": \"Search interface is offline\"\n" + " }\n" + "}\n"; +} + +} + TEST("requireThatStateIsReported") { MatchEngine engine(1, 1, 7); @@ -185,14 +199,44 @@ TEST("requireThatStateIsReported") Slime slime; SlimeInserter inserter(slime); engine.get_state(inserter, false); - EXPECT_EQUAL( - "{\n" - " \"status\": {\n" - " \"state\": \"OFFLINE\",\n" - " \"message\": \"Search interface is offline\"\n" - " }\n" - "}\n", - slime.toString()); + EXPECT_EQUAL(search_interface_offline_slime_str(), + slime.toString()); +} + +TEST("searches are executed when node is in maintenance mode") +{ + MatchEngine engine(1, 1, 7); + engine.setNodeMaintenance(true); + engine.putSearchHandler(DocTypeName("foo"), std::make_shared<MySearchHandler>(3)); + EXPECT_TRUE(assertSearchReply(engine, "foo", 3)); +} + +TEST("setNodeMaintenance(true) implies setNodeUp(false)") +{ + MatchEngine engine(1, 1, 7); + engine.setNodeUp(true); + engine.setNodeMaintenance(true); + EXPECT_FALSE(engine.isOnline()); +} + +TEST("setNodeMaintenance(false) does not imply setNodeUp(false)") +{ + MatchEngine engine(1, 1, 7); + engine.setNodeUp(true); + engine.setNodeMaintenance(false); + EXPECT_TRUE(engine.isOnline()); +} + +TEST("search interface is reported as offline when node is in maintenance mode") +{ + MatchEngine engine(1, 1, 7); + engine.setNodeMaintenance(true); + + Slime slime; + SlimeInserter inserter(slime); + engine.get_state(inserter, false); + EXPECT_EQUAL(search_interface_offline_slime_str(), + slime.toString()); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp index a95c4a00f0b..6d3eaa30263 100644 --- a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp +++ b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp @@ -13,6 +13,7 @@ #include <vespa/searchcore/config/config-ranking-expressions.h> #include <vespa/searchcore/config/config-onnx-models.h> #include <vespa/searchsummary/config/config-juniperrc.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/fileacquirer/config-filedistributorrpc.h> #include <vespa/vespalib/util/varholder.h> @@ -40,13 +41,11 @@ using vespa::config::content::core::BucketspacesConfigBuilder; using config::ConfigUri; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using search::TuneFileDocumentDB; using std::map; using vespalib::VarHolder; using search::GrowStrategy; -using search::CompactionStrategy; +using vespalib::datastore::CompactionStrategy; struct DoctypeFixture { using UP = std::unique_ptr<DoctypeFixture>; diff --git a/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp b/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp index 55b2c7194ba..aa819d08b58 100644 --- a/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp +++ b/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp @@ -7,6 +7,7 @@ #include <vespa/config-rank-profiles.h> #include <vespa/config-summary.h> #include <vespa/config-summarymap.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/fileacquirer/config-filedistributorrpc.h> #include <vespa/searchcore/proton/common/alloc_config.h> @@ -41,8 +42,6 @@ using vespa::config::content::core::BucketspacesConfigBuilder; using InitializeThreads = std::shared_ptr<vespalib::ThreadStackExecutorBase>; using config::ConfigUri; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using search::TuneFileDocumentDB; using std::map; using search::index::Schema; diff --git a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp index 3474a4297c7..516c31cb232 100644 --- a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp +++ b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp @@ -30,6 +30,10 @@ vespalib::string doc1("id:test:music::1"); } +TEST("control sizeof(PendingGidToLidChange)") { + EXPECT_EQUAL(48u, sizeof(PendingGidToLidChange)); +} + class ListenerStats { using lock_guard = std::lock_guard<std::mutex>; std::mutex _lock; diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp index aed9f44799a..e631388c9b8 100644 --- a/searchcore/src/tests/proton/server/documentretriever_test.cpp +++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp @@ -190,7 +190,7 @@ struct MyDocumentStore : proton::test::DummyDocumentStore { MyDocumentStore::~MyDocumentStore() = default; -document::DocumenttypesConfig getRepoConfig() { +DocumenttypesConfig getRepoConfig() { const int32_t doc_type_id = 787121340; DocumenttypesConfigBuilderHelper builder; diff --git a/searchcore/src/tests/proton/server/shared_threading_service/CMakeLists.txt b/searchcore/src/tests/proton/server/shared_threading_service/CMakeLists.txt new file mode 100644 index 00000000000..9b40ae19c99 --- /dev/null +++ b/searchcore/src/tests/proton/server/shared_threading_service/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(searchcore_shared_threading_service_test_app TEST + SOURCES + shared_threading_service_test.cpp + DEPENDS + searchcore_server + GTest::GTest +) +vespa_add_test(NAME searchcore_shared_threading_service_test_app COMMAND searchcore_shared_threading_service_test_app) diff --git a/searchcore/src/tests/proton/server/shared_threading_service/shared_threading_service_test.cpp b/searchcore/src/tests/proton/server/shared_threading_service/shared_threading_service_test.cpp new file mode 100644 index 00000000000..e90bfc8ae57 --- /dev/null +++ b/searchcore/src/tests/proton/server/shared_threading_service/shared_threading_service_test.cpp @@ -0,0 +1,79 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/searchcore/config/config-proton.h> +#include <vespa/searchcore/proton/server/shared_threading_service.h> +#include <vespa/searchcore/proton/server/shared_threading_service_config.h> +#include <vespa/vespalib/util/isequencedtaskexecutor.h> +#include <vespa/vespalib/util/sequencedtaskexecutor.h> +#include <vespa/vespalib/gtest/gtest.h> + +using namespace proton; +using vespalib::ISequencedTaskExecutor; +using vespalib::SequencedTaskExecutor; +using ProtonConfig = vespa::config::search::core::ProtonConfig; +using ProtonConfigBuilder = vespa::config::search::core::ProtonConfigBuilder; + +ProtonConfig +make_proton_config(double concurrency) +{ + ProtonConfigBuilder builder; + // This setup requires a minimum of 4 shared threads. + builder.documentdb.push_back(ProtonConfig::Documentdb()); + builder.documentdb.push_back(ProtonConfig::Documentdb()); + builder.flush.maxconcurrent = 1; + + builder.feeding.concurrency = concurrency; + builder.feeding.sharedFieldWriterExecutor = ProtonConfig::Feeding::SharedFieldWriterExecutor::DOCUMENT_DB; + builder.indexing.tasklimit = 300; + return builder; +} + +void +expect_shared_threads(uint32_t exp_threads, uint32_t cpu_cores) +{ + auto cfg = SharedThreadingServiceConfig::make(make_proton_config(0.5), HwInfo::Cpu(cpu_cores)); + EXPECT_EQ(exp_threads, cfg.shared_threads()); + EXPECT_EQ(exp_threads * 16, cfg.shared_task_limit()); +} + +TEST(SharedThreadingServiceConfigTest, shared_threads_are_derived_from_cpu_cores_and_feeding_concurrency) +{ + expect_shared_threads(4, 1); + expect_shared_threads(4, 6); + expect_shared_threads(4, 8); + expect_shared_threads(5, 9); + expect_shared_threads(5, 10); +} + +class SharedThreadingServiceTest : public ::testing::Test { +public: + std::unique_ptr<SharedThreadingService> service; + SharedThreadingServiceTest() + : service() + { + } + void setup(double concurrency, uint32_t cpu_cores) { + service = std::make_unique<SharedThreadingService>( + SharedThreadingServiceConfig::make(make_proton_config(concurrency), HwInfo::Cpu(cpu_cores))); + } + SequencedTaskExecutor* field_writer() { + return dynamic_cast<SequencedTaskExecutor*>(service->field_writer()); + } +}; + +void +assert_executor(SequencedTaskExecutor* exec, uint32_t exp_executors, uint32_t exp_task_limit) +{ + EXPECT_EQ(exp_executors, exec->getNumExecutors()); + EXPECT_EQ(exp_task_limit, exec->first_executor()->getTaskLimit()); +} + +TEST_F(SharedThreadingServiceTest, field_writer_can_be_shared_across_all_document_dbs) +{ + setup(0.75, 8); + EXPECT_TRUE(field_writer()); + EXPECT_EQ(6, field_writer()->getNumExecutors()); + EXPECT_EQ(300, field_writer()->first_executor()->getTaskLimit()); +} + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h index 9fd1743321e..de9a14cefd9 100644 --- a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h @@ -2,6 +2,7 @@ #pragma once +#include <vespa/document/config/documenttypes_config_fwd.h> #include "bm_cluster_params.h" #include <memory> #include <vector> @@ -19,7 +20,6 @@ class DocumentTypeRepo; class FieldSetRepo; } -namespace document::internal { class InternalDocumenttypesType; } namespace mbus { class Slobrok; } namespace storage::rpc { class SharedRpcResources; } @@ -41,7 +41,6 @@ class IBmFeedHandler; class BmCluster { struct MessageBusConfigSet; struct RpcClientConfigSet; - using DocumenttypesConfig = const document::internal::InternalDocumenttypesType; BmClusterParams _params; int _slobrok_port; int _rpc_client_port; diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp index 0882153edd6..db2060bacf7 100644 --- a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp @@ -1,19 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "bm_node.h" #include "bm_cluster.h" #include "bm_cluster_params.h" #include "bm_message_bus.h" +#include "bm_node.h" #include "bm_node_stats.h" #include "bm_storage_chain_builder.h" #include "bm_storage_link_context.h" -#include "storage_api_chain_bm_feed_handler.h" -#include "storage_api_message_bus_bm_feed_handler.h" -#include "storage_api_rpc_bm_feed_handler.h" #include "document_api_message_bus_bm_feed_handler.h" #include "i_bm_distribution.h" #include "i_bm_feed_handler.h" #include "spi_bm_feed_handler.h" +#include "storage_api_chain_bm_feed_handler.h" +#include "storage_api_message_bus_bm_feed_handler.h" +#include "storage_api_rpc_bm_feed_handler.h" +#include <tests/proton/common/dummydbowner.h> #include <vespa/config-attributes.h> #include <vespa/config-bucketspaces.h> #include <vespa/config-imported-fields.h> @@ -28,6 +29,7 @@ #include <vespa/config-upgrading.h> #include <vespa/config/common/configcontext.h> #include <vespa/document/bucket/bucketspace.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/document/repo/documenttyperepo.h> @@ -39,18 +41,19 @@ #include <vespa/searchcore/proton/common/alloc_config.h> #include <vespa/searchcore/proton/matching/querylimiter.h> #include <vespa/searchcore/proton/metrics/metricswireservice.h> -#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h> #include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h> +#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h> #include <vespa/searchcore/proton/persistenceengine/persistenceengine.h> #include <vespa/searchcore/proton/server/bootstrapconfig.h> -#include <vespa/searchcore/proton/server/documentdb.h> #include <vespa/searchcore/proton/server/document_db_maintenance_config.h> #include <vespa/searchcore/proton/server/document_meta_store_read_guards.h> +#include <vespa/searchcore/proton/server/documentdb.h> #include <vespa/searchcore/proton/server/documentdbconfigmanager.h> #include <vespa/searchcore/proton/server/fileconfigmanager.h> #include <vespa/searchcore/proton/server/memoryconfigstore.h> #include <vespa/searchcore/proton/server/persistencehandlerproxy.h> #include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> +#include <vespa/searchcore/proton/test/mock_shared_threading_service.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/searchlib/transactionlog/translogserver.h> #include <vespa/searchsummary/config/config-juniperrc.h> @@ -75,7 +78,6 @@ #include <vespa/vespalib/io/fileutil.h> #include <vespa/vespalib/stllike/asciistream.h> #include <vespa/vespalib/util/size_literals.h> -#include <tests/proton/common/dummydbowner.h> #include <vespa/log/log.h> LOG_SETUP(".bmcluster.bm_node"); @@ -84,8 +86,6 @@ using cloud::config::SlobroksConfigBuilder; using cloud::config::filedistribution::FiledistributorrpcConfig; using config::ConfigSet; using document::BucketSpace; -using document::DocumenttypesConfig; -using document::DocumenttypesConfigBuilder; using document::DocumentType; using document::DocumentTypeRepo; using document::Field; @@ -459,6 +459,7 @@ class MyBmNode : public BmNode proton::DummyWireService _metrics_wire_service; proton::MemoryConfigStores _config_stores; vespalib::ThreadStackExecutor _summary_executor; + proton::MockSharedThreadingService _shared_service; proton::DummyDBOwner _document_db_owner; BucketSpace _bucket_space; std::shared_ptr<DocumentDB> _document_db; @@ -481,7 +482,7 @@ class MyBmNode : public BmNode void create_document_db(const BmClusterParams& params); public: - MyBmNode(const vespalib::string &base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port); + MyBmNode(const vespalib::string &base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, int slobrok_port); ~MyBmNode() override; void initialize_persistence_provider() override; void create_bucket(const document::Bucket& bucket) override; @@ -498,7 +499,7 @@ public: void merge_node_stats(std::vector<BmNodeStats>& node_stats, storage::lib::ClusterState &baseline_state) override; }; -MyBmNode::MyBmNode(const vespalib::string& base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port) +MyBmNode::MyBmNode(const vespalib::string& base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, int slobrok_port) : BmNode(), _cluster(cluster), _document_types(std::move(document_types)), @@ -523,6 +524,7 @@ MyBmNode::MyBmNode(const vespalib::string& base_dir, int base_port, uint32_t nod _metrics_wire_service(), _config_stores(), _summary_executor(8, 128_Ki), + _shared_service(_summary_executor, _summary_executor), _document_db_owner(), _bucket_space(document::test::makeBucketSpace(_doc_type_name.getName())), _document_db(), @@ -594,7 +596,7 @@ MyBmNode::create_document_db(const BmClusterParams& params) mgr.nextGeneration(0ms); _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name, _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner, - _summary_executor, _summary_executor, *_persistence_engine, _tls, + _shared_service, *_persistence_engine, _tls, _metrics_wire_service, _file_header_context, _config_stores.getConfigStore(_doc_type_name.toString()), std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo()); @@ -797,7 +799,7 @@ MyBmNode::merge_node_stats(std::vector<BmNodeStats>& node_stats, storage::lib::C } std::unique_ptr<BmNode> -BmNode::create(const vespalib::string& base_dir, int base_port, uint32_t node_idx, BmCluster &cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port) +BmNode::create(const vespalib::string& base_dir, int base_port, uint32_t node_idx, BmCluster &cluster, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, int slobrok_port) { return std::make_unique<MyBmNode>(base_dir, base_port, node_idx, cluster, params, std::move(document_types), slobrok_port); } diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.h b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h index 49c80db44ce..86c5b2ee095 100644 --- a/searchcore/src/vespa/searchcore/bmcluster/bm_node.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h @@ -15,8 +15,6 @@ class Field; }; -namespace document::internal { class InternalDocumenttypesType; } - namespace storage::lib { class ClusterState; } namespace storage::spi { struct PersistenceProvider; } @@ -52,7 +50,7 @@ public: virtual storage::spi::PersistenceProvider *get_persistence_provider() = 0; virtual void merge_node_stats(std::vector<BmNodeStats>& node_stats, storage::lib::ClusterState &baseline_state) = 0; static unsigned int num_ports(); - static std::unique_ptr<BmNode> create(const vespalib::string &base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<const document::internal::InternalDocumenttypesType> document_types, int slobrok_port); + static std::unique_ptr<BmNode> create(const vespalib::string &base_dir, int base_port, uint32_t node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, int slobrok_port); }; } diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def index 5857fdd4f8d..30f0081a8f0 100644 --- a/searchcore/src/vespa/searchcore/config/proton.def +++ b/searchcore/src/vespa/searchcore/config/proton.def @@ -144,7 +144,7 @@ indexing.semiunboundtasklimit int default = 1000 indexing.kind_of_watermark int default = 0 restart ## Controls minimum reaction time in seconds if using THROUGHPUT -indexing.reactiontime double default = 0.005 restart +indexing.reactiontime double default = 0.001 restart ## How long a freshly loaded index shall be warmed up diff --git a/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt index 4f8bc362655..856c89eae37 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt +++ b/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt @@ -38,4 +38,5 @@ vespa_add_library(searchcore_attribute STATIC sequential_attributes_initializer.cpp DEPENDS searchcore_flushengine + searchcore_pcommon ) diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.cpp index e30c246e53a..da1b733ca0a 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.cpp @@ -16,6 +16,8 @@ AttributeCollectionSpecFactory::AttributeCollectionSpecFactory( { } +AttributeCollectionSpecFactory::~AttributeCollectionSpecFactory() = default; + AttributeCollectionSpec::UP AttributeCollectionSpecFactory::create(const AttributesConfig &attrCfg, uint32_t docIdLimit, diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h b/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h index aed99ea7d52..3565b533e02 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h @@ -25,6 +25,7 @@ private: public: AttributeCollectionSpecFactory(const AllocStrategy& alloc_strategy, bool fastAccessOnly); + ~AttributeCollectionSpecFactory(); AttributeCollectionSpec::UP create(const AttributesConfig &attrCfg, uint32_t docIdLimit, diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp index 5c695f7b0f2..66be0737fe9 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp @@ -119,7 +119,7 @@ extractHeader(const vespalib::string &attrFileName) auto df = search::FileUtil::openFile(attrFileName + ".dat"); vespalib::FileHeader datHeader; datHeader.readFile(*df); - return AttributeHeader::extractTags(datHeader); + return AttributeHeader::extractTags(datHeader, attrFileName); } void diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp index 1c730b063f8..d8c2d4e6e72 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp @@ -7,10 +7,10 @@ #include <future> using search::AttributeVector; -using search::CompactionStrategy; using search::GrowStrategy; using search::SerialNum; using vespa::config::search::AttributesConfig; +using vespalib::datastore::CompactionStrategy; namespace proton { @@ -165,6 +165,8 @@ AttributeManagerInitializer::AttributeManagerInitializer(SerialNum configSerialN _attrMgr = std::make_shared<AttributeManager>(*baseAttrMgr, *attrSpec, tasksBuilder); } +AttributeManagerInitializer::~AttributeManagerInitializer() = default; + void AttributeManagerInitializer::run() { diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.h b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.h index 5e5361dfff3..cd2e044dee6 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.h @@ -42,6 +42,7 @@ public: bool fastAccessAttributesOnly, searchcorespi::index::IThreadService &master, std::shared_ptr<AttributeManager::SP> attrMgrResult); + ~AttributeManagerInitializer() override; virtual void run() override; }; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp index d69f07354ef..52b367fd14b 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp @@ -259,7 +259,7 @@ applyCommit(CommitParam param, AttributeWriter::OnWriteDoneType , AttributeVecto void applyCompactLidSpace(uint32_t wantedLidLimit, SerialNum serialNum, AttributeVector &attr) { - if (attr.getStatus().getLastSyncToken() < serialNum) { + if (attr.getStatus().getLastSyncToken() <= serialNum) { /* * If the attribute is an empty placeholder attribute due to * later config changes removing the attribute then it might @@ -549,7 +549,6 @@ public: for (auto lidToRemove : _lidsToRemove) { applyRemoveToAttribute(_serialNum, lidToRemove, attr, _onWriteDone); } - attr.commit(false); } } } @@ -801,12 +800,13 @@ AttributeWriter::update(SerialNum serialNum, const DocumentUpdate &upd, Document } void -AttributeWriter::heartBeat(SerialNum serialNum) +AttributeWriter::heartBeat(SerialNum serialNum, OnWriteDoneType onDone) { for (auto entry : _attrMap) { - _attributeFieldWriter.execute(entry.second.executor_id, - [serialNum, attr=entry.second.attribute]() - { applyHeartBeat(serialNum, *attr); }); + _attributeFieldWriter.execute(entry.second.executor_id,[serialNum, attr=entry.second.attribute, onDone]() { + (void) onDone; + applyHeartBeat(serialNum, *attr); + }); } } diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.h b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.h index dc543c19222..f43aab0f385 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.h @@ -109,7 +109,7 @@ public: void update(SerialNum serialNum, const DocumentUpdate &upd, DocumentIdT lid, OnWriteDoneType onWriteDone, IFieldUpdateCallback & onUpdate) override; void update(SerialNum serialNum, const Document &doc, DocumentIdT lid, OnWriteDoneType onWriteDone) override; - void heartBeat(SerialNum serialNum) override; + void heartBeat(SerialNum serialNum, OnWriteDoneType onDone) override; void compactLidSpace(uint32_t wantedLidLimit, SerialNum serialNum) override; const proton::IAttributeManager::SP &getAttributeManager() const override { return _mgr; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp index 5ee162e9264..eee6264b9f4 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp @@ -595,7 +595,7 @@ AttributeManager::asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> } void -AttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const +AttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func, OnDone onDone) const { for (const auto &attr : _attributes) { if (attr.second.isExtra()) { @@ -604,7 +604,10 @@ AttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) } AttributeVector::SP attrsp = attr.second.getAttribute(); _attributeFieldWriter.execute(_attributeFieldWriter.getExecutorIdFromName(attrsp->getNamePrefix()), - [attrsp, func]() { (*func)(*attrsp); }); + [attrsp, func, onDone]() { + (void) onDone; + (*func)(*attrsp); + }); } } diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h index e2b9550435d..08e2d511d70 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h @@ -178,7 +178,7 @@ public: const std::vector<search::AttributeVector *> &getWritableAttributes() const override; void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const override; - void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const override; + void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func, OnDone onDone) const override; void asyncForAttribute(const vespalib::string &name, std::unique_ptr<IAttributeFunctor> func) const override; ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const override; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp index c7ab83ae590..5f162281d96 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp @@ -206,7 +206,7 @@ FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IConstAttributeFun } void -FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const +FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func, OnDone onDone) const { // Run by document db master thread std::vector<AttributeGuard> completeList; @@ -217,7 +217,10 @@ FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> // Name must be extracted in document db master thread or attribute // writer thread attributeFieldWriter.execute(attributeFieldWriter.getExecutorIdFromName(attrsp->getNamePrefix()), - [attrsp, func]() { (*func)(*attrsp); }); + [attrsp, func, onDone]() { + (void) onDone; + (*func)(*attrsp); + }); } } diff --git a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h index 1ae5f452218..1512ab32d62 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h @@ -52,7 +52,7 @@ public: search::AttributeVector * getWritableAttribute(const vespalib::string &name) const override; const std::vector<search::AttributeVector *> & getWritableAttributes() const override; void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const override; - void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const override; + void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func, OnDone onDone) const override; ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const override; void setImportedAttributes(std::unique_ptr<ImportedAttributesRepo> attributes) override; const ImportedAttributesRepo *getImportedAttributes() const override; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h index d55cd45d014..b8968ba9d2e 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h @@ -31,7 +31,7 @@ class ImportedAttributesRepo; struct IAttributeManager : public search::IAttributeManager { using SP = std::shared_ptr<IAttributeManager>; - using OnWriteDoneType = const std::shared_ptr<vespalib::IDestructorCallback> &; + using OnDone = std::shared_ptr<vespalib::IDestructorCallback>; using IAttributeFunctor = search::attribute::IAttributeFunctor; using IConstAttributeFunctor = search::attribute::IConstAttributeFunctor; @@ -98,7 +98,7 @@ struct IAttributeManager : public search::IAttributeManager virtual const std::vector<search::AttributeVector *> &getWritableAttributes() const = 0; virtual void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const = 0; - virtual void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const = 0; + virtual void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func, OnDone onDone) const = 0; virtual ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const = 0; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_writer.h b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_writer.h index 0f739f6ffea..e0bac5facd4 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_writer.h +++ b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_writer.h @@ -50,7 +50,7 @@ public: * Update the underlying struct field attributes based on updated document. */ virtual void update(SerialNum serialNum, const Document &doc, DocumentIdT lid, OnWriteDoneType onWriteDone) = 0; - virtual void heartBeat(SerialNum serialNum) = 0; + virtual void heartBeat(SerialNum serialNum, OnWriteDoneType onDone) = 0; /** * Compact the lid space of the underlying attribute vectors. */ diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp index 3664f67f8fb..69a2d4f3ea9 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp @@ -4,8 +4,8 @@ #include <vespa/searchcore/proton/common/subdbtype.h> #include <algorithm> -using search::CompactionStrategy; using search::GrowStrategy; +using vespalib::datastore::CompactionStrategy; namespace proton { diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp index cbe8309b031..32ac249f7e1 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp @@ -3,7 +3,6 @@ #include "alloc_strategy.h" #include <iostream> -using search::CompactionStrategy; using search::GrowStrategy; namespace proton { diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h index 4771a8637cd..9c6e24e2bfe 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h @@ -2,8 +2,8 @@ #pragma once -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchcommon/common/growstrategy.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <iosfwd> namespace proton { @@ -14,14 +14,16 @@ namespace proton { */ class AllocStrategy { +public: + using CompactionStrategy = vespalib::datastore::CompactionStrategy; protected: const search::GrowStrategy _grow_strategy; - const search::CompactionStrategy _compaction_strategy; + const CompactionStrategy _compaction_strategy; const uint32_t _amortize_count; public: AllocStrategy(const search::GrowStrategy& grow_strategy, - const search::CompactionStrategy& compaction_strategy, + const CompactionStrategy& compaction_strategy, uint32_t amortize_count); AllocStrategy(); @@ -32,7 +34,7 @@ public: return !operator==(rhs); } const search::GrowStrategy& get_grow_strategy() const noexcept { return _grow_strategy; } - const search::CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; } + const CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; } uint32_t get_amortize_count() const noexcept { return _amortize_count; } }; diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp index 07580817dc9..06bf8d0a8a6 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp @@ -2,7 +2,6 @@ #include "summarycompacttarget.h" #include <vespa/vespalib/util/lambdatask.h> -#include <vespa/searchcorespi/index/i_thread_service.h> #include <future> using search::IDocumentStore; @@ -10,21 +9,26 @@ using search::SerialNum; using vespalib::makeLambdaTask; using searchcorespi::FlushStats; using searchcorespi::IFlushTarget; +using searchcorespi::FlushTask; namespace proton { namespace { -class Compacter : public searchcorespi::FlushTask { +class Compacter : public FlushTask { private: IDocumentStore & _docStore; FlushStats & _stats; SerialNum _currSerial; + virtual void compact(IDocumentStore & docStore, SerialNum currSerial) const = 0; public: - Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) : - _docStore(docStore), _stats(stats), _currSerial(currSerial) {} + Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : _docStore(docStore), + _stats(stats), + _currSerial(currSerial) + {} void run() override { - _docStore.compact(_currSerial); + compact(_docStore, _currSerial); updateStats(); } void updateStats() { @@ -37,10 +41,32 @@ public: } }; +class CompactBloat : public Compacter { +public: + CompactBloat(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : Compacter(docStore, stats, currSerial) + {} +private: + void compact(IDocumentStore & docStore, SerialNum currSerial) const override { + docStore.compactBloat(currSerial); + } +}; + +class CompactSpread : public Compacter { +public: + CompactSpread(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : Compacter(docStore, stats, currSerial) + {} +private: + void compact(IDocumentStore & docStore, SerialNum currSerial) const override { + docStore.compactSpread(currSerial); + } +}; + } -SummaryCompactTarget::SummaryCompactTarget(searchcorespi::index::IThreadService & summaryService, IDocumentStore & docStore) - : IFlushTarget("summary.compact", Type::GC, Component::DOCUMENT_STORE), +SummaryGCTarget::SummaryGCTarget(const vespalib::string & name, vespalib::Executor & summaryService, IDocumentStore & docStore) + : IFlushTarget(name, Type::GC, Component::DOCUMENT_STORE), _summaryService(summaryService), _docStore(docStore), _lastStats() @@ -49,43 +75,69 @@ SummaryCompactTarget::SummaryCompactTarget(searchcorespi::index::IThreadService } IFlushTarget::MemoryGain -SummaryCompactTarget::getApproxMemoryGain() const +SummaryGCTarget::getApproxMemoryGain() const { return MemoryGain::noGain(_docStore.memoryUsed()); } IFlushTarget::DiskGain -SummaryCompactTarget::getApproxDiskGain() const +SummaryGCTarget::getApproxDiskGain() const { - uint64_t total(_docStore.getDiskFootprint()); - return DiskGain(total, total - std::min(total, static_cast<uint64_t>(_docStore.getMaxCompactGain()))); + size_t total(_docStore.getDiskFootprint()); + return DiskGain(total, total - std::min(total, getBloat(_docStore))); } IFlushTarget::Time -SummaryCompactTarget::getLastFlushTime() const +SummaryGCTarget::getLastFlushTime() const { return vespalib::system_clock::now(); } SerialNum -SummaryCompactTarget::getFlushedSerialNum() const +SummaryGCTarget::getFlushedSerialNum() const { return _docStore.tentativeLastSyncToken(); } IFlushTarget::Task::UP -SummaryCompactTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>) +SummaryGCTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>) { std::promise<Task::UP> promise; std::future<Task::UP> future = promise.get_future(); - _summaryService.execute(makeLambdaTask([&]() { promise.set_value(std::make_unique<Compacter>(_docStore, _lastStats, currentSerial)); })); + _summaryService.execute(makeLambdaTask([this, &promise,currentSerial]() { + promise.set_value(create(_docStore, _lastStats, currentSerial)); + })); return future.get(); } -uint64_t -SummaryCompactTarget::getApproxBytesToWriteToDisk() const +SummaryCompactBloatTarget::SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore) + : SummaryGCTarget("summary.compact_bloat", summaryService, docStore) { - return 0; +} + +size_t +SummaryCompactBloatTarget::getBloat(const search::IDocumentStore & docStore) const { + return docStore.getDiskBloat(); +} + +FlushTask::UP +SummaryCompactBloatTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) { + return std::make_unique<CompactBloat>(docStore, stats, currSerial); +} + +SummaryCompactSpreadTarget::SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore) + : SummaryGCTarget("summary.compact_spread", summaryService, docStore) +{ +} + +size_t +SummaryCompactSpreadTarget::getBloat(const search::IDocumentStore & docStore) const { + return docStore.getMaxSpreadAsBloat(); +} + +FlushTask::UP +SummaryCompactSpreadTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) { + return std::make_unique<CompactSpread>(docStore, stats, currSerial); } } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h index a5f39e953a5..083f763d8e6 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h @@ -12,26 +12,52 @@ namespace proton { /** * This class implements the IFlushTarget interface to proxy a summary manager. */ -class SummaryCompactTarget : public searchcorespi::IFlushTarget { -private: +class SummaryGCTarget : public searchcorespi::IFlushTarget { +public: using FlushStats = searchcorespi::FlushStats; - searchcorespi::index::IThreadService &_summaryService; - search::IDocumentStore & _docStore; - FlushStats _lastStats; + using IDocumentStore = search::IDocumentStore; + MemoryGain getApproxMemoryGain() const override; + DiskGain getApproxDiskGain() const override; + SerialNum getFlushedSerialNum() const override; + Time getLastFlushTime() const override; -public: - SummaryCompactTarget(searchcorespi::index::IThreadService & summaryService, search::IDocumentStore & docStore); + Task::UP initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken> flush_token) override; + + FlushStats getLastFlushStats() const override { return _lastStats; } + uint64_t getApproxBytesToWriteToDisk() const override { return 0; } +protected: + SummaryGCTarget(const vespalib::string &, vespalib::Executor & summaryService, IDocumentStore & docStore); +private: + + virtual size_t getBloat(const IDocumentStore & docStore) const = 0; + virtual Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) = 0; - // Implements IFlushTarget - virtual MemoryGain getApproxMemoryGain() const override; - virtual DiskGain getApproxDiskGain() const override; - virtual SerialNum getFlushedSerialNum() const override; - virtual Time getLastFlushTime() const override; + vespalib::Executor &_summaryService; + IDocumentStore & _docStore; + FlushStats _lastStats; +}; - virtual Task::UP initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken> flush_token) override; +/** + * Implements target to compact away removed documents. Wasted disk space is cost factor used for prioritizing. + */ +class SummaryCompactBloatTarget : public SummaryGCTarget { +private: + size_t getBloat(const search::IDocumentStore & docStore) const override; + Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override; +public: + SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore); +}; - virtual FlushStats getLastFlushStats() const override { return _lastStats; } - virtual uint64_t getApproxBytesToWriteToDisk() const override; +/** + * Target to ensure bucket spread is kept low. The cost is reported as a potential gain in disk space as + * we do not have a concept for bucket spread. + */ +class SummaryCompactSpreadTarget : public SummaryGCTarget { +private: + size_t getBloat(const search::IDocumentStore & docStore) const override; + Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override; +public: + SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore); }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp index 7f164af7339..45fc23175bf 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "summaryflushtarget.h" -#include <vespa/searchcorespi/index/i_thread_service.h> #include <vespa/vespalib/util/lambdatask.h> using search::IDocumentStore; @@ -28,7 +27,7 @@ public: { _currSerial = _docStore.initFlush(currSerial); } - virtual void run() override { + void run() override { _docStore.flush(_currSerial); updateStats(); } @@ -37,17 +36,13 @@ public: _stats.setPath(_docStore.getBaseDir()); } - virtual SerialNum - getFlushSerial() const override - { - return _currSerial; - } + SerialNum getFlushSerial() const override { return _currSerial; } }; } SummaryFlushTarget::SummaryFlushTarget(IDocumentStore & docStore, - searchcorespi::index::IThreadService & summaryService) + vespalib::Executor & summaryService) : IFlushTarget("summary.flush", Type::SYNC, Component::DOCUMENT_STORE), _docStore(docStore), _summaryService(summaryService), @@ -62,12 +57,6 @@ SummaryFlushTarget::getApproxMemoryGain() const return MemoryGain(_docStore.memoryUsed(), _docStore.memoryMeta()); } -IFlushTarget::DiskGain -SummaryFlushTarget::getApproxDiskGain() const -{ - return DiskGain(0, 0); -} - IFlushTarget::Time SummaryFlushTarget::getLastFlushTime() const { @@ -97,11 +86,4 @@ SummaryFlushTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::I return future.get(); } -uint64_t -SummaryFlushTarget::getApproxBytesToWriteToDisk() const -{ - return 0; -} - - } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.h b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.h index 99cfa1a2080..f864b922af8 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.h +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.h @@ -4,7 +4,6 @@ #include <vespa/searchlib/docstore/idocumentstore.h> #include <vespa/searchcorespi/flush/iflushtarget.h> -namespace searchcorespi::index { struct IThreadService; } namespace proton { /** @@ -14,25 +13,24 @@ class SummaryFlushTarget : public searchcorespi::IFlushTarget { private: using FlushStats = searchcorespi::FlushStats; search::IDocumentStore & _docStore; - searchcorespi::index::IThreadService & _summaryService; - FlushStats _lastStats; + vespalib::Executor & _summaryService; + FlushStats _lastStats; Task::UP internalInitFlush(SerialNum currentSerial); public: SummaryFlushTarget(search::IDocumentStore & docStore, - searchcorespi::index::IThreadService & summaryService); + vespalib::Executor & summaryService); - // Implements IFlushTarget - virtual MemoryGain getApproxMemoryGain() const override; - virtual DiskGain getApproxDiskGain() const override; - virtual SerialNum getFlushedSerialNum() const override; - virtual Time getLastFlushTime() const override; + MemoryGain getApproxMemoryGain() const override; + DiskGain getApproxDiskGain() const override { return DiskGain(0, 0); } + SerialNum getFlushedSerialNum() const override; + Time getLastFlushTime() const override; - virtual Task::UP initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken> flush_token) override; + Task::UP initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken> flush_token) override; - virtual FlushStats getLastFlushStats() const override { return _lastStats; } - virtual uint64_t getApproxBytesToWriteToDisk() const override; + FlushStats getLastFlushStats() const override { return _lastStats; } + uint64_t getApproxBytesToWriteToDisk() const override { return 0; } }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp index 4570151d3eb..28a91e1444d 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp @@ -7,7 +7,6 @@ #include <vespa/config/print/ostreamconfigwriter.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/juniper/rpinterface.h> -#include <vespa/searchcorespi/index/i_thread_service.h> #include <vespa/searchcore/proton/flushengine/shrink_lid_space_flush_target.h> #include <vespa/vespalib/util/lambdatask.h> #include <vespa/searchsummary/docsummary/docsumconfig.h> @@ -45,12 +44,12 @@ namespace { class ShrinkSummaryLidSpaceFlushTarget : public ShrinkLidSpaceFlushTarget { using ICompactableLidSpace = search::common::ICompactableLidSpace; - searchcorespi::index::IThreadService & _summaryService; + vespalib::Executor & _summaryService; public: ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name, Type type, Component component, SerialNum flushedSerialNum, vespalib::system_time lastFlushTime, - searchcorespi::index::IThreadService & summaryService, + vespalib::Executor & summaryService, std::shared_ptr<ICompactableLidSpace> target); ~ShrinkSummaryLidSpaceFlushTarget() override; Task::UP initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken> flush_token) override; @@ -59,7 +58,7 @@ public: ShrinkSummaryLidSpaceFlushTarget:: ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name, Type type, Component component, SerialNum flushedSerialNum, vespalib::system_time lastFlushTime, - searchcorespi::index::IThreadService & summaryService, + vespalib::Executor & summaryService, std::shared_ptr<ICompactableLidSpace> target) : ShrinkLidSpaceFlushTarget(name, type, component, flushedSerialNum, lastFlushTime, std::move(target)), _summaryService(summaryService) @@ -97,7 +96,7 @@ SummarySetup(const vespalib::string & baseDir, const DocTypeName & docTypeName, auto resultConfig = std::make_unique<ResultConfig>(); if (!resultConfig->ReadConfig(summaryCfg, make_string("SummaryManager(%s)", baseDir.c_str()).c_str())) { std::ostringstream oss; - config::OstreamConfigWriter writer(oss); + ::config::OstreamConfigWriter writer(oss); writer.write(summaryCfg); throw IllegalArgumentException (make_string("Could not initialize summary result config for directory '%s' based on summary config '%s'", @@ -153,9 +152,7 @@ SummaryManager::SummaryManager(vespalib::ThreadExecutor & executor, const LogDoc search::IBucketizer::SP bucketizer) : _baseDir(baseDir), _docTypeName(docTypeName), - _docStore(), - _tuneFileSummary(tuneFileSummary), - _currentSerial(0u) + _docStore() { _docStore = std::make_shared<LogDocumentStore>(executor, baseDir, storeConfig, growStrategy, tuneFileSummary, fileHeaderContext, tlSyncer, std::move(bucketizer)); @@ -167,27 +164,24 @@ void SummaryManager::putDocument(uint64_t syncToken, search::DocumentIdT lid, const Document & doc) { _docStore->write(syncToken, lid, doc); - _currentSerial = syncToken; } void SummaryManager::putDocument(uint64_t syncToken, search::DocumentIdT lid, const vespalib::nbostream & doc) { _docStore->write(syncToken, lid, doc); - _currentSerial = syncToken; } void SummaryManager::removeDocument(uint64_t syncToken, search::DocumentIdT lid) { _docStore->remove(syncToken, lid); - _currentSerial = syncToken; } namespace { IFlushTarget::SP -createShrinkLidSpaceFlushTarget(searchcorespi::index::IThreadService & summaryService, IDocumentStore::SP docStore) +createShrinkLidSpaceFlushTarget(vespalib::Executor & summaryService, IDocumentStore::SP docStore) { return std::make_shared<ShrinkSummaryLidSpaceFlushTarget>("summary.shrink", IFlushTarget::Type::GC, @@ -200,18 +194,21 @@ createShrinkLidSpaceFlushTarget(searchcorespi::index::IThreadService & summarySe } -IFlushTarget::List SummaryManager::getFlushTargets(searchcorespi::index::IThreadService & summaryService) +IFlushTarget::List +SummaryManager::getFlushTargets(vespalib::Executor & summaryService) { IFlushTarget::List ret; ret.push_back(std::make_shared<SummaryFlushTarget>(getBackingStore(), summaryService)); if (dynamic_cast<LogDocumentStore *>(_docStore.get()) != nullptr) { - ret.push_back(std::make_shared<SummaryCompactTarget>(summaryService, getBackingStore())); + ret.push_back(std::make_shared<SummaryCompactBloatTarget>(summaryService, getBackingStore())); + ret.push_back(std::make_shared<SummaryCompactSpreadTarget>(summaryService, getBackingStore())); } ret.push_back(createShrinkLidSpaceFlushTarget(summaryService, _docStore)); return ret; } -void SummaryManager::reconfigure(const LogDocumentStore::Config & config) { +void +SummaryManager::reconfigure(const LogDocumentStore::Config & config) { auto & docStore = dynamic_cast<LogDocumentStore &> (*_docStore); docStore.reconfigure(config); } diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h index 9bff8723ff6..b3cbd399262 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h @@ -12,7 +12,6 @@ #include <vespa/document/fieldvalue/document.h> #include <vespa/vespalib/util/threadexecutor.h> -namespace searchcorespi::index { struct IThreadService; } namespace search { class IBucketizer; } namespace search::common { class FileHeaderContext; } @@ -58,8 +57,6 @@ private: vespalib::string _baseDir; DocTypeName _docTypeName; std::shared_ptr<search::IDocumentStore> _docStore; - const search::TuneFileSummary _tuneFileSummary; - uint64_t _currentSerial; public: typedef std::shared_ptr<SummaryManager> SP; @@ -77,7 +74,7 @@ public: void putDocument(uint64_t syncToken, search::DocumentIdT lid, const document::Document & doc); void putDocument(uint64_t syncToken, search::DocumentIdT lid, const vespalib::nbostream & doc); void removeDocument(uint64_t syncToken, search::DocumentIdT lid); - searchcorespi::IFlushTarget::List getFlushTargets(searchcorespi::index::IThreadService & summaryService); + searchcorespi::IFlushTarget::List getFlushTargets(vespalib::Executor & summaryService); ISummarySetup::SP createSummarySetup(const vespa::config::search::SummaryConfig &summaryCfg, diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp index 3170654409b..28234730f7b 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp @@ -197,10 +197,7 @@ DocumentMetaStore::consider_compact_gid_to_lid_map() if (_gidToLidMap.getAllocator().getNodeStore().has_held_buffers()) { return false; } - auto &compaction_strategy = getConfig().getCompactionStrategy(); - size_t used_bytes = _cached_gid_to_lid_map_memory_usage.usedBytes(); - size_t dead_bytes = _cached_gid_to_lid_map_memory_usage.deadBytes(); - return compaction_strategy.should_compact_memory(used_bytes, dead_bytes); + return _should_compact_gid_to_lid_map; } void @@ -209,7 +206,7 @@ DocumentMetaStore::onCommit() if (consider_compact_gid_to_lid_map()) { incGeneration(); _changesSinceCommit = 0; - _gidToLidMap.compact_worst(); + _gidToLidMap.compact_worst(getConfig().getCompactionStrategy()); _gid_to_lid_map_write_itr_prepare_serial_num = 0u; _gid_to_lid_map_write_itr.begin(_gidToLidMap.getRoot()); incGeneration(); @@ -223,13 +220,15 @@ DocumentMetaStore::onCommit() void DocumentMetaStore::onUpdateStat() { + auto &compaction_strategy = getConfig().getCompactionStrategy(); vespalib::MemoryUsage usage = _metaDataStore.getMemoryUsage(); usage.incAllocatedBytesOnHold(getGenerationHolder().getHeldBytes()); size_t bvSize = _lidAlloc.getUsedLidsSize(); usage.incAllocatedBytes(bvSize); usage.incUsedBytes(bvSize); - _cached_gid_to_lid_map_memory_usage = _gidToLidMap.getMemoryUsage(); - usage.merge(_cached_gid_to_lid_map_memory_usage); + auto gid_to_lid_map_memory_usage = _gidToLidMap.getMemoryUsage(); + _should_compact_gid_to_lid_map = compaction_strategy.should_compact_memory(gid_to_lid_map_memory_usage); + usage.merge(gid_to_lid_map_memory_usage); // the free lists are not taken into account here updateStatistics(_metaDataStore.size(), _metaDataStore.size(), @@ -424,7 +423,7 @@ DocumentMetaStore::DocumentMetaStore(BucketDBOwnerSP bucketDB, _trackDocumentSizes(true), _changesSinceCommit(0), _op_listener(), - _cached_gid_to_lid_map_memory_usage() + _should_compact_gid_to_lid_map(false) { ensureSpace(0); // lid 0 is reserved setCommittedDocIdLimit(1u); // lid 0 is reserved diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h index d78e98713ff..9e4977c65e1 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h @@ -77,7 +77,7 @@ private: bool _trackDocumentSizes; size_t _changesSinceCommit; OperationListenerSP _op_listener; - vespalib::MemoryUsage _cached_gid_to_lid_map_memory_usage; + bool _should_compact_gid_to_lid_map; DocId getFreeLid(); DocId peekFreeLid(); diff --git a/searchcore/src/vespa/searchcore/proton/flushengine/flushengine.h b/searchcore/src/vespa/searchcore/proton/flushengine/flushengine.h index 4eaa722e0ba..632f4482654 100644 --- a/searchcore/src/vespa/searchcore/proton/flushengine/flushengine.h +++ b/searchcore/src/vespa/searchcore/proton/flushengine/flushengine.h @@ -119,7 +119,7 @@ public: /** * Returns the underlying executor. Only used for state explorers. */ - const vespalib::SyncableThreadExecutor& get_executor() const { return _executor; } + const vespalib::ThreadExecutor& get_executor() const { return _executor; } /** * Starts the scheduling thread of this manager. diff --git a/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.cpp b/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.cpp index 9e915779d92..630c536a1ca 100644 --- a/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.cpp @@ -15,7 +15,7 @@ IndexManagerInitializer(const vespalib::string &baseDir, search::SerialNum serialNum, searchcorespi::IIndexManager::Reconfigurer & reconfigurer, searchcorespi::index::IThreadingService & threadingService, - vespalib::SyncableThreadExecutor & warmupExecutor, + vespalib::Executor & warmupExecutor, const search::TuneFileIndexManager & tuneFileIndexManager, const search::TuneFileAttributes &tuneFileAttributes, const search::common::FileHeaderContext & fileHeaderContext, diff --git a/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.h b/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.h index a7acfb61d54..3cf1daf631e 100644 --- a/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.h +++ b/searchcore/src/vespa/searchcore/proton/index/index_manager_initializer.h @@ -20,7 +20,7 @@ class IndexManagerInitializer : public initializer::InitializerTask search::SerialNum _serialNum; searchcorespi::IIndexManager::Reconfigurer &_reconfigurer; searchcorespi::index::IThreadingService &_threadingService; - vespalib::SyncableThreadExecutor &_warmupExecutor; + vespalib::Executor &_warmupExecutor; const search::TuneFileIndexManager _tuneFileIndexManager; const search::TuneFileAttributes _tuneFileAttributes; const search::common::FileHeaderContext &_fileHeaderContext; @@ -33,7 +33,7 @@ public: search::SerialNum serialNum, searchcorespi::IIndexManager::Reconfigurer & reconfigurer, searchcorespi::index::IThreadingService & threadingService, - vespalib::SyncableThreadExecutor & warmupExecutor, + vespalib::Executor & warmupExecutor, const search::TuneFileIndexManager & tuneFileIndexManager, const search::TuneFileAttributes & tuneFileAttributes, const search::common::FileHeaderContext & fileHeaderContext, diff --git a/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp b/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp index 169ba149297..de397e81d76 100644 --- a/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp @@ -80,7 +80,7 @@ IndexManager::IndexManager(const vespalib::string &baseDir, SerialNum serialNum, Reconfigurer &reconfigurer, IThreadingService &threadingService, - vespalib::SyncableThreadExecutor & warmupExecutor, + vespalib::Executor & warmupExecutor, const search::TuneFileIndexManager &tuneFileIndexManager, const search::TuneFileAttributes &tuneFileAttributes, const FileHeaderContext &fileHeaderContext) : diff --git a/searchcore/src/vespa/searchcore/proton/index/indexmanager.h b/searchcore/src/vespa/searchcore/proton/index/indexmanager.h index 4113af30b0d..436b4127804 100644 --- a/searchcore/src/vespa/searchcore/proton/index/indexmanager.h +++ b/searchcore/src/vespa/searchcore/proton/index/indexmanager.h @@ -73,7 +73,7 @@ public: SerialNum serialNum, Reconfigurer &reconfigurer, searchcorespi::index::IThreadingService &threadingService, - vespalib::SyncableThreadExecutor & warmupExecutor, + vespalib::Executor & warmupExecutor, const search::TuneFileIndexManager &tuneFileIndexManager, const search::TuneFileAttributes &tuneFileAttributes, const search::common::FileHeaderContext &fileHeaderContext); diff --git a/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.cpp b/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.cpp index 5ad4a7ed52b..58dc473b85e 100644 --- a/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.cpp +++ b/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.cpp @@ -50,7 +50,8 @@ MatchEngine::MatchEngine(size_t numThreads, size_t threadsPerSearch, uint32_t di _handlers(), _executor(std::max(size_t(1), numThreads / threadsPerSearch), 256_Ki, match_engine_executor), _threadBundlePool(std::max(size_t(1), threadsPerSearch)), - _nodeUp(false) + _nodeUp(false), + _nodeMaintenance(false) { } @@ -98,7 +99,8 @@ search::engine::SearchReply::UP MatchEngine::search(search::engine::SearchRequest::Source request, search::engine::SearchClient &client) { - if (_closed || !_nodeUp) { + // We continue to allow searches if the node is in Maintenance mode + if (_closed || (!_nodeUp && !_nodeMaintenance)) { auto ret = std::make_unique<search::engine::SearchReply>(); ret->setDistributionKey(_distributionKey); @@ -177,6 +179,14 @@ MatchEngine::setNodeUp(bool nodeUp) _nodeUp = nodeUp; } +void +MatchEngine::setNodeMaintenance(bool nodeMaintenance) +{ + _nodeMaintenance = nodeMaintenance; + if (nodeMaintenance) { + _nodeUp = false; + } +} StatusReport::UP MatchEngine::reportStatus() const diff --git a/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.h b/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.h index b4e32c45003..3d3be775a4a 100644 --- a/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.h +++ b/searchcore/src/vespa/searchcore/proton/matchengine/matchengine.h @@ -26,6 +26,7 @@ private: vespalib::ThreadStackExecutor _executor; vespalib::SimpleThreadBundle::Pool _threadBundlePool; bool _nodeUp; + bool _nodeMaintenance; public: /** @@ -68,7 +69,7 @@ public: /** * Returns the underlying executor. Only used for state explorers. */ - const vespalib::SyncableThreadExecutor& get_executor() const { return _executor; } + const vespalib::ThreadExecutor& get_executor() const { return _executor; } /** * Closes the request handler interface. This will prevent any more data @@ -131,6 +132,13 @@ public: */ void setNodeUp(bool nodeUp); + /** + * Set node into maintenance, based on info from cluster controller. Note that + * nodeMaintenance == true also implies setNodeUp(false), as the node is technically + * not in a Up state. + */ + void setNodeMaintenance(bool nodeMaintenance); + StatusReport::UP reportStatus() const; search::engine::SearchReply::UP search( diff --git a/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h b/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h index f51043e9960..4aa8a3f6392 100644 --- a/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h +++ b/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h @@ -174,7 +174,7 @@ private: std::condition_variable condition; bool is_idle; DocidRange next_range; - Worker() : condition(), is_idle(false), next_range() {} + Worker() noexcept : condition(), is_idle(false), next_range() {} }; DocidRangeSplitter _splitter; uint32_t _min_task; diff --git a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h index 7cdf8991fb4..047c6fcaf13 100644 --- a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h +++ b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h @@ -67,7 +67,7 @@ public: Avg _wait_time; friend MatchingStats; public: - Partition() + Partition() noexcept : _docsCovered(0), _docsMatched(0), _docsRanked(0), diff --git a/searchcore/src/vespa/searchcore/proton/metrics/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/metrics/CMakeLists.txt index 43e01420a22..ba6e5fd1ea5 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/CMakeLists.txt +++ b/searchcore/src/vespa/searchcore/proton/metrics/CMakeLists.txt @@ -5,6 +5,8 @@ vespa_add_library(searchcore_proton_metrics STATIC content_proton_metrics.cpp documentdb_job_trackers.cpp documentdb_tagged_metrics.cpp + document_db_commit_metrics.cpp + document_db_feeding_metrics.cpp executor_metrics.cpp executor_threading_service_metrics.cpp executor_threading_service_stats.cpp diff --git a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp index 0db3488dc28..d8c42795099 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp +++ b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp @@ -11,7 +11,8 @@ ContentProtonMetrics::ProtonExecutorMetrics::ProtonExecutorMetrics(metrics::Metr match("match", this), docsum("docsum", this), shared("shared", this), - warmup("warmup", this) + warmup("warmup", this), + field_writer("field_writer", this) { } diff --git a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.h index 26629d13569..70d3d16cb7c 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.h +++ b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.h @@ -26,6 +26,7 @@ struct ContentProtonMetrics : metrics::MetricSet ExecutorMetrics docsum; ExecutorMetrics shared; ExecutorMetrics warmup; + ExecutorMetrics field_writer; ProtonExecutorMetrics(metrics::MetricSet *parent); ~ProtonExecutorMetrics(); diff --git a/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.cpp new file mode 100644 index 00000000000..c5b7d71a982 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.cpp @@ -0,0 +1,16 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "document_db_commit_metrics.h" + +namespace proton { + +DocumentDBCommitMetrics::DocumentDBCommitMetrics(metrics::MetricSet* parent) + : MetricSet("commit", {}, "commit metrics for feeding in a document database", parent), + operations("operations", {}, "Number of operations included in a commit", this), + latency("latency", {}, "Latency for commit", this) +{ +} + +DocumentDBCommitMetrics::~DocumentDBCommitMetrics() = default; + +} diff --git a/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.h new file mode 100644 index 00000000000..45c826a7ccf --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/metrics/document_db_commit_metrics.h @@ -0,0 +1,21 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include <vespa/metrics/metricset.h> +#include <vespa/metrics/valuemetric.h> + +namespace proton { + +/* + * Metrics for commits during feeding within a document db. + */ +struct DocumentDBCommitMetrics : metrics::MetricSet +{ + metrics::DoubleAverageMetric operations; + metrics::DoubleAverageMetric latency; + + DocumentDBCommitMetrics(metrics::MetricSet* parent); + ~DocumentDBCommitMetrics() override; +}; + +} diff --git a/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.cpp new file mode 100644 index 00000000000..0c7f8f6f039 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.cpp @@ -0,0 +1,15 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "document_db_feeding_metrics.h" + +namespace proton { + +DocumentDBFeedingMetrics::DocumentDBFeedingMetrics(metrics::MetricSet* parent) + : MetricSet("feeding", {}, "feeding metrics in a document database", parent), + commit(this) +{ +} + +DocumentDBFeedingMetrics::~DocumentDBFeedingMetrics() = default; + +} diff --git a/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.h new file mode 100644 index 00000000000..7353cfbdc04 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/metrics/document_db_feeding_metrics.h @@ -0,0 +1,19 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "document_db_commit_metrics.h" + +namespace proton { + +/* + * Metrics for feeding within a document db. + */ +struct DocumentDBFeedingMetrics : metrics::MetricSet +{ + DocumentDBCommitMetrics commit; + + DocumentDBFeedingMetrics(metrics::MetricSet* parent); + ~DocumentDBFeedingMetrics() override; +}; + +} diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp index 1d947bb003a..e895a03b190 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp +++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp @@ -268,6 +268,7 @@ DocumentDBTaggedMetrics::DocumentDBTaggedMetrics(const vespalib::string &docType sessionCache(this), documents(this), bucketMove(this), + feeding(this), totalMemoryUsage(this), totalDiskUsage("disk_usage", {}, "The total disk usage (in bytes) for this document db", this), heart_beat_age("heart_beat_age", {}, "How long ago (in seconds) heart beat maintenace job was run", this), diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h index bdedfeea8b9..c9d321d6752 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h +++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h @@ -5,6 +5,7 @@ #include "memory_usage_metrics.h" #include "executor_threading_service_metrics.h" #include "sessionmanager_metrics.h" +#include "document_db_feeding_metrics.h" #include <vespa/metrics/metricset.h> #include <vespa/metrics/valuemetric.h> #include <vespa/searchcore/proton/matching/matching_stats.h> @@ -202,6 +203,7 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet SessionCacheMetrics sessionCache; DocumentsMetrics documents; BucketMoveMetrics bucketMove; + DocumentDBFeedingMetrics feeding; MemoryUsageMetrics totalMemoryUsage; metrics::LongValueMetric totalDiskUsage; metrics::DoubleValueMetric heart_beat_age; diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp index af24dcd976d..63644e5c7ab 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp +++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp @@ -7,14 +7,12 @@ namespace proton { ExecutorThreadingServiceStats::ExecutorThreadingServiceStats(Stats masterExecutorStats, Stats indexExecutorStats, Stats summaryExecutorStats, - Stats sharedExecutorStats, Stats indexFieldInverterExecutorStats, Stats indexFieldWriterExecutorStats, Stats attributeFieldWriterExecutorStats) : _masterExecutorStats(masterExecutorStats), _indexExecutorStats(indexExecutorStats), _summaryExecutorStats(summaryExecutorStats), - _sharedExecutorStats(sharedExecutorStats), _indexFieldInverterExecutorStats(indexFieldInverterExecutorStats), _indexFieldWriterExecutorStats(indexFieldWriterExecutorStats), _attributeFieldWriterExecutorStats(attributeFieldWriterExecutorStats) diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h index e2c53af11b5..8015ec83ae9 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h +++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h @@ -16,7 +16,6 @@ private: Stats _masterExecutorStats; Stats _indexExecutorStats; Stats _summaryExecutorStats; - Stats _sharedExecutorStats; Stats _indexFieldInverterExecutorStats; Stats _indexFieldWriterExecutorStats; Stats _attributeFieldWriterExecutorStats; @@ -24,7 +23,6 @@ public: ExecutorThreadingServiceStats(Stats masterExecutorStats, Stats indexExecutorStats, Stats summaryExecutorStats, - Stats sharedExecutorStats, Stats indexFieldInverterExecutorStats, Stats indexFieldWriterExecutorStats, Stats attributeFieldWriterExecutorStats); @@ -33,7 +31,6 @@ public: const Stats &getMasterExecutorStats() const { return _masterExecutorStats; } const Stats &getIndexExecutorStats() const { return _indexExecutorStats; } const Stats &getSummaryExecutorStats() const { return _summaryExecutorStats; } - const Stats &getSharedExecutorStats() const { return _sharedExecutorStats; } const Stats &getIndexFieldInverterExecutorStats() const { return _indexFieldInverterExecutorStats; } const Stats &getIndexFieldWriterExecutorStats() const { return _indexFieldWriterExecutorStats; } const Stats &getAttributeFieldWriterExecutorStats() const { return _attributeFieldWriterExecutorStats; } diff --git a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt index 511adbe66e9..1daacc29fcb 100644 --- a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt +++ b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt @@ -47,6 +47,7 @@ vespa_add_library(searchcore_server STATIC feedhandler.cpp feedstate.cpp feedstates.cpp + feed_handler_stats.cpp fileconfigmanager.cpp flushhandlerproxy.cpp forcecommitcontext.cpp @@ -97,6 +98,8 @@ vespa_add_library(searchcore_server STATIC searchhandlerproxy.cpp searchview.cpp simpleflush.cpp + shared_threading_service.cpp + shared_threading_service_config.cpp storeonlydocsubdb.cpp storeonlyfeedview.cpp summaryadapter.cpp diff --git a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.cpp b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.cpp index bc1c16ed5b9..fb533547d00 100644 --- a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.cpp @@ -2,13 +2,13 @@ #include "bootstrapconfig.h" #include <vespa/config-bucketspaces.h> +#include <vespa/document/config/documenttypes_config_fwd.h> using namespace vespa::config::search; using namespace config; using document::DocumentTypeRepo; using search::TuneFileDocumentDB; using vespa::config::search::core::ProtonConfig; -using document::DocumenttypesConfig; namespace { template <typename T> diff --git a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h index b8ed1226220..4ae0adeb5a6 100644 --- a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h +++ b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h @@ -50,7 +50,7 @@ public: const HwInfo & hwInfo); ~BootstrapConfig(); - const document::DocumenttypesConfig &getDocumenttypesConfig() const { return *_documenttypes; } + const document::config::DocumenttypesConfig &getDocumenttypesConfig() const { return *_documenttypes; } const FiledistributorrpcConfig &getFiledistributorrpcConfig() const { return *_fileDistributorRpc; } const FiledistributorrpcConfigSP &getFiledistributorrpcConfigSP() const { return _fileDistributorRpc; } const DocumenttypesConfigSP &getDocumenttypesConfigSP() const { return _documenttypes; } diff --git a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfigmanager.cpp index e81f576cf46..8f99eb5e8a7 100644 --- a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfigmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfigmanager.cpp @@ -2,6 +2,7 @@ #include "bootstrapconfigmanager.h" #include "bootstrapconfig.h" +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/searchcore/proton/common/hw_info_sampler.h> #include <vespa/config-bucketspaces.h> @@ -19,7 +20,6 @@ using search::TuneFileDocumentDB; using vespa::config::search::core::ProtonConfig; using cloud::config::filedistribution::FiledistributorrpcConfig; using vespa::config::content::core::BucketspacesConfig; -using document::DocumenttypesConfig; using document::DocumentTypeRepoFactory; using BucketspacesConfigSP = std::shared_ptr<BucketspacesConfig>; diff --git a/searchcore/src/vespa/searchcore/proton/server/buckethandler.cpp b/searchcore/src/vespa/searchcore/proton/server/buckethandler.cpp index c15be9336fe..0d9b2f6aaf6 100644 --- a/searchcore/src/vespa/searchcore/proton/server/buckethandler.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/buckethandler.cpp @@ -68,7 +68,8 @@ BucketHandler::BucketHandler(vespalib::Executor &executor) _executor(executor), _ready(nullptr), _changedHandlers(), - _nodeUp(false) + _nodeUp(false), + _nodeMaintenance(false) { LOG(spam, "BucketHandler::BucketHandler"); } @@ -143,13 +144,32 @@ BucketHandler::handlePopulateActiveBuckets(document::BucketId::List &buckets, })); } +namespace { +constexpr const char* bool_str(bool v) noexcept { + return v ? "true" : "false"; +} +} + void BucketHandler::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> & newCalc) { bool oldNodeUp = _nodeUp; - _nodeUp = newCalc->nodeUp(); - LOG(spam, "notifyClusterStateChanged: %s -> %s", oldNodeUp ? "up" : "down", _nodeUp ? "up" : "down"); - if (oldNodeUp && !_nodeUp) { + bool oldNodeMaintenance = _nodeMaintenance; + _nodeUp = newCalc->nodeUp(); // Up, Retired or Initializing + _nodeMaintenance = newCalc->nodeMaintenance(); + LOG(spam, "notifyClusterStateChanged; up: %s -> %s, maintenance: %s -> %s", + bool_str(oldNodeUp), bool_str(_nodeUp), + bool_str(oldNodeMaintenance), bool_str(_nodeMaintenance)); + if (_nodeMaintenance) { + return; // Don't deactivate buckets in maintenance mode; let query traffic drain away naturally. + } + // We implicitly deactivate buckets in two edge cases: + // - Up -> Down (not maintenance; handled above), since the node can not be expected to offer + // any graceful query draining when set Down. + // - Maintenance -> !Maintenance, since we'd otherwise introduce a bunch of transient duplicate + // results into queries if we transition to an available state. + // The assumption is that the system has already activated buckets on other nodes in such a scenario. + if ((oldNodeUp && !_nodeUp) || oldNodeMaintenance) { deactivateAllActiveBuckets(); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/buckethandler.h b/searchcore/src/vespa/searchcore/proton/server/buckethandler.h index 7f44d2ebd71..2344e080450 100644 --- a/searchcore/src/vespa/searchcore/proton/server/buckethandler.h +++ b/searchcore/src/vespa/searchcore/proton/server/buckethandler.h @@ -25,6 +25,7 @@ private: documentmetastore::IBucketHandler *_ready; std::vector<IBucketStateChangedHandler *> _changedHandlers; bool _nodeUp; + bool _nodeMaintenance; void performSetCurrentState(document::BucketId bucketId, storage::spi::BucketInfo::ActiveState newState, diff --git a/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp b/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp index 3d709bd19d1..20cd4ced6b2 100644 --- a/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp @@ -27,6 +27,7 @@ private: bool _nodeUp; bool _nodeInitializing; bool _nodeRetired; + bool _nodeMaintenance; public: ClusterStateAdapter(const ClusterState &calc) @@ -34,7 +35,8 @@ public: _clusterUp(_calc.clusterUp()), _nodeUp(_calc.nodeUp()), _nodeInitializing(_calc.nodeInitializing()), - _nodeRetired(_calc.nodeRetired()) + _nodeRetired(_calc.nodeRetired()), + _nodeMaintenance(_calc.nodeMaintenance()) { } vespalib::Trinary shouldBeReady(const document::Bucket &bucket) const override { @@ -44,6 +46,7 @@ public: bool nodeUp() const override { return _nodeUp; } bool nodeInitializing() const override { return _nodeInitializing; } bool nodeRetired() const override { return _nodeRetired; } + bool nodeMaintenance() const noexcept override { return _nodeMaintenance; } }; } @@ -53,11 +56,12 @@ ClusterStateHandler::performSetClusterState(const ClusterState *calc, IGenericRe { LOG(debug, "performSetClusterState(): " - "clusterUp(%s), nodeUp(%s), nodeInitializing(%s)" + "clusterUp(%s), nodeUp(%s), nodeInitializing(%s), nodeMaintenance(%s)" "changedHandlers.size() = %zu", (calc->clusterUp() ? "true" : "false"), (calc->nodeUp() ? "true" : "false"), (calc->nodeInitializing() ? "true" : "false"), + (calc->nodeMaintenance() ? "true" : "false"), _changedHandlers.size()); if (!_changedHandlers.empty()) { auto newCalc = std::make_shared<ClusterStateAdapter>(*calc); diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp index f7702708c53..297a9b9254f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp @@ -187,10 +187,10 @@ CombiningFeedView::prepareDeleteBucket(DeleteBucketOperation &delOp) } void -CombiningFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp) +CombiningFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) { for (const auto &view : _views) { - view->handleDeleteBucket(delOp); + view->handleDeleteBucket(delOp, onDone); } } @@ -203,7 +203,7 @@ CombiningFeedView::prepareMove(MoveOperation &moveOp) } void -CombiningFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::SP moveDoneCtx) +CombiningFeedView::handleMove(const MoveOperation &moveOp, DoneCallback moveDoneCtx) { assert(moveOp.getValidDbdId()); uint32_t subDbId = moveOp.getSubDbId(); @@ -218,10 +218,10 @@ CombiningFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback:: } void -CombiningFeedView::heartBeat(search::SerialNum serialNum) +CombiningFeedView::heartBeat(search::SerialNum serialNum, DoneCallback onDone) { for (const auto &view : _views) { - view->heartBeat(serialNum); + view->heartBeat(serialNum, onDone); } } @@ -235,17 +235,17 @@ CombiningFeedView::forceCommit(const CommitParam & param, DoneCallback onDone) void CombiningFeedView:: -handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) +handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) { - getRemFeedView()->handlePruneRemovedDocuments(pruneOp); + getRemFeedView()->handlePruneRemovedDocuments(pruneOp, onDone); } void -CombiningFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op) +CombiningFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) { uint32_t subDbId = op.getSubDbId(); assert(subDbId < _views.size()); - _views[subDbId]->handleCompactLidSpace(op); + _views[subDbId]->handleCompactLidSpace(op, onDone); } void diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h index 3827b491896..ff640a32887 100644 --- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h +++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h @@ -72,12 +72,12 @@ public: void prepareRemove(RemoveOperation &rmOp) override; void handleRemove(FeedToken token, const RemoveOperation &rmOp) override; void prepareDeleteBucket(DeleteBucketOperation &delOp) override; - void handleDeleteBucket(const DeleteBucketOperation &delOp) override; void prepareMove(MoveOperation &putOp) override; - void handleMove(const MoveOperation &moveOp, std::shared_ptr<vespalib::IDestructorCallback> moveDoneCtx) override; - void heartBeat(search::SerialNum serialNum) override; - void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) override; - void handleCompactLidSpace(const CompactLidSpaceOperation &op) override; + void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override; + void handleMove(const MoveOperation &moveOp, DoneCallback onDone) override; + void heartBeat(search::SerialNum serialNum, DoneCallback onDone) override; + void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) override; + void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override; // Called by document db executor void setCalculator(const std::shared_ptr<IBucketStateCalculator> &newCalc); diff --git a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp index 4265d4c7099..c0f27e729af 100644 --- a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp @@ -23,12 +23,9 @@ DocumentScanIterator::valid() const } DocumentMetaData -DocumentScanIterator::next(uint32_t compactLidLimit, bool retry) +DocumentScanIterator::next(uint32_t compactLidLimit) { - if (!retry) { - --_lastLid; - } - for (; _lastLid > compactLidLimit; --_lastLid) { + for (--_lastLid; _lastLid > compactLidLimit; --_lastLid) { if (_metaStore.validLid(_lastLid)) { const RawDocumentMetaData &metaData = _metaStore.getRawMetaData(_lastLid); return DocumentMetaData(_lastLid, metaData.getTimestamp(), diff --git a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h index b7c5131171a..ebdeb902474 100644 --- a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h +++ b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h @@ -21,7 +21,7 @@ private: public: DocumentScanIterator(const IDocumentMetaStore &_metaStore); bool valid() const override; - search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) override; + search::DocumentMetaData next(uint32_t compactLidLimit) override; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp index 142a2f748a2..53bdc356015 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp @@ -7,6 +7,7 @@ #include "documentdb.h" #include "documentdbconfigscout.h" #include "feedhandler.h" +#include "i_shared_threading_service.h" #include "idocumentdbowner.h" #include "idocumentsubdb.h" #include "maintenance_jobs_injector.h" @@ -131,8 +132,7 @@ DocumentDB::create(const vespalib::string &baseDir, document::BucketSpace bucketSpace, const ProtonConfig &protonCfg, IDocumentDBOwner &owner, - vespalib::SyncableThreadExecutor &warmupExecutor, - vespalib::ThreadExecutor &sharedExecutor, + ISharedThreadingService& shared_service, storage::spi::BucketExecutor &bucketExecutor, const search::transactionlog::WriterFactory &tlsWriterFactory, MetricsWireService &metricsWireService, @@ -143,7 +143,7 @@ DocumentDB::create(const vespalib::string &baseDir, { return DocumentDB::SP( new DocumentDB(baseDir, std::move(currentSnapshot), tlsSpec, queryLimiter, clock, docTypeName, bucketSpace, - protonCfg, owner, warmupExecutor, sharedExecutor, bucketExecutor, tlsWriterFactory, + protonCfg, owner, shared_service, bucketExecutor, tlsWriterFactory, metricsWireService, fileHeaderContext, std::move(config_store), initializeThreads, hwInfo)); } DocumentDB::DocumentDB(const vespalib::string &baseDir, @@ -155,8 +155,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir, document::BucketSpace bucketSpace, const ProtonConfig &protonCfg, IDocumentDBOwner &owner, - vespalib::SyncableThreadExecutor &warmupExecutor, - vespalib::ThreadExecutor &sharedExecutor, + ISharedThreadingService& shared_service, storage::spi::BucketExecutor & bucketExecutor, const search::transactionlog::WriterFactory &tlsWriterFactory, MetricsWireService &metricsWireService, @@ -176,7 +175,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir, _baseDir(baseDir + "/" + _docTypeName.toString()), // Only one thread per executor, or performDropFeedView() will fail. _writeServiceConfig(configSnapshot->get_threading_service_config()), - _writeService(sharedExecutor, _writeServiceConfig, indexing_thread_stack_size), + _writeService(shared_service.shared(), shared_service.field_writer(), &shared_service.invokeService(), _writeServiceConfig, indexing_thread_stack_size), _initializeThreads(std::move(initializeThreads)), _initConfigSnapshot(), _initConfigSerialNum(0u), @@ -204,12 +203,12 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir, _writeFilter(), _transient_usage_provider(std::make_shared<DocumentDBResourceUsageProvider>(*this)), _feedHandler(std::make_unique<FeedHandler>(_writeService, tlsSpec, docTypeName, *this, _writeFilter, *this, tlsWriterFactory)), - _subDBs(*this, *this, *_feedHandler, _docTypeName, _writeService, warmupExecutor, fileHeaderContext, + _subDBs(*this, *this, *_feedHandler, _docTypeName, _writeService, shared_service.warmup(), fileHeaderContext, metricsWireService, getMetrics(), queryLimiter, clock, _configMutex, _baseDir, hwInfo), - _maintenanceController(_writeService.master(), sharedExecutor, _refCount, _docTypeName), + _maintenanceController(_writeService.master(), shared_service.shared(), _refCount, _docTypeName), _jobTrackers(), _calc(), - _metricsUpdater(_subDBs, _writeService, _jobTrackers, *_sessionManager, _writeFilter) + _metricsUpdater(_subDBs, _writeService, _jobTrackers, *_sessionManager, _writeFilter, *_feedHandler) { assert(configSnapshot); @@ -551,7 +550,10 @@ DocumentDB::close() // Caller should have removed document DB from feed router. _refCount.waitForZeroRefCount(); - _writeService.sync_all_executors(); + masterExecute([this] () { + _feedView.get()->forceCommitAndWait(search::CommitParam(getCurrentSerialNumber())); + }); + _writeService.master().sync(); // The attributes in the ready sub db is also the total set of attributes. DocumentDBTaggedMetrics &metrics = getMetrics(); @@ -559,10 +561,8 @@ DocumentDB::close() _metricsWireService.cleanAttributes(metrics.notReady.attributes); masterExecute([this] () { - _feedView.get()->forceCommitAndWait(search::CommitParam(getCurrentSerialNumber())); closeSubDBs(); }); - _writeService.sync_all_executors(); // What about queued tasks ? _writeService.shutdown(); _maintenanceController.kill(); @@ -895,7 +895,6 @@ DocumentDB::syncFeedView() IFeedView::SP newFeedView(_subDBs.getFeedView()); _maintenanceController.killJobs(); - _writeService.sync_all_executors(); _feedView.set(newFeedView); _feedHandler->setActiveFeedView(newFeedView.get()); @@ -997,7 +996,7 @@ DocumentDB::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculat if (cfv != nullptr) cfv->setCalculator(newCalc); } - _subDBs.setBucketStateCalculator(newCalc); + _subDBs.setBucketStateCalculator(newCalc, std::shared_ptr<vespalib::IDestructorCallback>()); } diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.h b/searchcore/src/vespa/searchcore/proton/server/documentdb.h index 391c11df276..e829f477e8a 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.h @@ -47,12 +47,13 @@ namespace storage::spi { struct BucketExecutor; } namespace proton { class AttributeConfigInspector; +class ExecutorThreadingServiceStats; class IDocumentDBOwner; +class ISharedThreadingService; class ITransientResourceUsageProvider; -struct MetricsWireService; class StatusReport; -class ExecutorThreadingServiceStats; class TransientResourceUsageProvider; +struct MetricsWireService; namespace matching { class SessionManager; } @@ -71,10 +72,15 @@ class DocumentDB : public DocumentDBConfigOwner, public std::enable_shared_from_this<DocumentDB> { private: - using InitializeThreads = std::shared_ptr<vespalib::SyncableThreadExecutor>; + using InitializeThreads = std::shared_ptr<vespalib::ThreadExecutor>; using IFlushTargetList = std::vector<std::shared_ptr<searchcorespi::IFlushTarget>>; using StatusReportUP = std::unique_ptr<StatusReport>; using ProtonConfig = const vespa::config::search::core::internal::InternalProtonType; + using ConfigComparisonResult = DocumentDBConfig::ComparisonResult; + using lock_guard = std::lock_guard<std::mutex>; + using SerialNum = search::SerialNum; + using Schema = search::index::Schema; + DocTypeName _docTypeName; document::BucketSpace _bucketSpace; @@ -85,9 +91,6 @@ private: // threads for initializer tasks during proton startup InitializeThreads _initializeThreads; - typedef search::SerialNum SerialNum; - typedef search::index::Schema Schema; - using lock_guard = std::lock_guard<std::mutex>; // variables related to reconfig DocumentDBConfig::SP _initConfigSnapshot; SerialNum _initConfigSerialNum; @@ -97,10 +100,7 @@ private: DocumentDBConfig::SP _activeConfigSnapshot; int64_t _activeConfigSnapshotGeneration; const bool _validateAndSanitizeDocStore; - - vespalib::Gate _initGate; - - typedef DocumentDBConfig::ComparisonResult ConfigComparisonResult; + vespalib::Gate _initGate; ClusterStateHandler _clusterStateHandler; BucketHandler _bucketHandler; @@ -201,8 +201,7 @@ private: document::BucketSpace bucketSpace, const ProtonConfig &protonCfg, IDocumentDBOwner &owner, - vespalib::SyncableThreadExecutor &warmupExecutor, - vespalib::ThreadExecutor &sharedExecutor, + ISharedThreadingService& shared_service, storage::spi::BucketExecutor &bucketExecutor, const search::transactionlog::WriterFactory &tlsWriterFactory, MetricsWireService &metricsWireService, @@ -233,8 +232,7 @@ public: document::BucketSpace bucketSpace, const ProtonConfig &protonCfg, IDocumentDBOwner &owner, - vespalib::SyncableThreadExecutor &warmupExecutor, - vespalib::ThreadExecutor &sharedExecutor, + ISharedThreadingService& shared_service, storage::spi::BucketExecutor & bucketExecutor, const search::transactionlog::WriterFactory &tlsWriterFactory, MetricsWireService &metricsWireService, diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp index d0bd7d4ee69..4e156539441 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp @@ -5,6 +5,7 @@ #include "documentdb_metrics_updater.h" #include "documentsubdbcollection.h" #include "executorthreadingservice.h" +#include "feedhandler.h" #include "idocumentsubdb.h" #include <vespa/searchcommon/attribute/status.h> #include <vespa/searchcore/proton/attribute/attribute_usage_filter.h> @@ -34,12 +35,16 @@ DocumentDBMetricsUpdater::DocumentDBMetricsUpdater(const DocumentSubDBCollection ExecutorThreadingService &writeService, DocumentDBJobTrackers &jobTrackers, matching::SessionManager &sessionManager, - const AttributeUsageFilter &writeFilter) + const AttributeUsageFilter &writeFilter, + FeedHandler& feed_handler) : _subDBs(subDBs), _writeService(writeService), _jobTrackers(jobTrackers), _sessionManager(sessionManager), - _writeFilter(writeFilter) + _writeFilter(writeFilter), + _feed_handler(feed_handler), + _lastDocStoreCacheStats(), + _last_feed_handler_stats() { } @@ -280,6 +285,27 @@ updateLidSpaceMetrics(MetricSetType &metrics, const search::IDocumentMetaStore & metrics.lidFragmentationFactor.set(stats.getLidFragmentationFactor()); } +void +update_feeding_metrics(DocumentDBFeedingMetrics& metrics, FeedHandlerStats stats, std::optional<FeedHandlerStats>& last_stats) +{ + auto delta_stats = stats; + if (last_stats.has_value()) { + delta_stats -= last_stats.value(); + } + last_stats = stats; + uint32_t commits = delta_stats.get_commits(); + if (commits != 0) { + double min_operations = delta_stats.get_min_operations().value_or(0); + double max_operations = delta_stats.get_max_operations().value_or(0); + double avg_operations = ((double) delta_stats.get_operations()) / commits; + metrics.commit.operations.addValueBatch(avg_operations, commits, min_operations, max_operations); + double min_latency = delta_stats.get_min_latency().value_or(0.0); + double max_latency = delta_stats.get_max_latency().value_or(0.0); + double avg_latency = delta_stats.get_total_latency() / commits; + metrics.commit.latency.addValueBatch(avg_latency, commits, min_latency, max_latency); + } +} + } void @@ -297,6 +323,7 @@ DocumentDBMetricsUpdater::updateMetrics(const metrics::MetricLockGuard & guard, metrics.totalMemoryUsage.update(totalStats.memoryUsage); metrics.totalDiskUsage.set(totalStats.diskUsage); + update_feeding_metrics(metrics.feeding, _feed_handler.get_stats(true), _last_feed_handler_stats); } void diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h index b73fa3b4eb9..381d98b2199 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h @@ -1,8 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once +#include "feed_handler_stats.h" #include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h> #include <vespa/searchlib/docstore/cachestats.h> +#include <optional> namespace proton { @@ -14,6 +16,7 @@ class DocumentDBJobTrackers; class DocumentSubDBCollection; class ExecutorThreadingService; class ExecutorThreadingServiceStats; +class FeedHandler; /** * Class used to update metrics for a document db. @@ -34,8 +37,10 @@ private: DocumentDBJobTrackers &_jobTrackers; matching::SessionManager &_sessionManager; const AttributeUsageFilter &_writeFilter; + FeedHandler &_feed_handler; // Last updated document store cache statistics. Necessary due to metrics implementation is upside down. DocumentStoreCacheStats _lastDocStoreCacheStats; + std::optional<FeedHandlerStats> _last_feed_handler_stats; void updateMiscMetrics(DocumentDBTaggedMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats); void updateAttributeResourceUsageMetrics(DocumentDBTaggedMetrics::AttributeMetrics &metrics); @@ -45,7 +50,8 @@ public: ExecutorThreadingService &writeService, DocumentDBJobTrackers &jobTrackers, matching::SessionManager &sessionManager, - const AttributeUsageFilter &writeFilter); + const AttributeUsageFilter &writeFilter, + FeedHandler& feed_handler); ~DocumentDBMetricsUpdater(); void updateMetrics(const metrics::MetricLockGuard & guard, DocumentDBTaggedMetrics &metrics); diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp index 4b2ef276077..5bc54404721 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp @@ -9,6 +9,7 @@ #include <vespa/config-summary.h> #include <vespa/config-summarymap.h> #include <vespa/searchsummary/config/config-juniperrc.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/config/config-documenttypes.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/searchcore/config/config-ranking-constants.h> @@ -23,7 +24,6 @@ using namespace vespa::config::search::summary; using namespace vespa::config::search; using document::DocumentTypeRepo; -using document::DocumenttypesConfig; using search::TuneFileDocumentDB; using search::index::Schema; using vespa::config::search::SummarymapConfig; diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h index fb29b086718..c0b0eb9dd1b 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h +++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h @@ -3,6 +3,7 @@ #pragma once #include "document_db_maintenance_config.h" +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/searchlib/common/tunefileinfo.h> #include <vespa/searchcommon/common/schema.h> #include <vespa/searchcore/proton/matching/ranking_constants.h> @@ -26,7 +27,6 @@ namespace document { class DocumentTypeRepo; class DocumentType; } -namespace document::internal { class InternalDocumenttypesType; } namespace proton { @@ -112,7 +112,6 @@ public: using SummarymapConfigSP = std::shared_ptr<SummarymapConfig>; using JuniperrcConfig = const vespa::config::search::summary::internal::InternalJuniperrcType; using JuniperrcConfigSP = std::shared_ptr<JuniperrcConfig>; - using DocumenttypesConfig = const document::internal::InternalDocumenttypesType; using DocumenttypesConfigSP = std::shared_ptr<DocumenttypesConfig>; using MaintenanceConfigSP = DocumentDBMaintenanceConfig::SP; using ImportedFieldsConfig = const vespa::config::search::internal::InternalImportedFieldsType; diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp index a9873a80d0e..2eb6b1b92f0 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp @@ -48,6 +48,7 @@ using search::DocumentStore; using search::WriteableFileChunk; using std::make_shared; using std::make_unique; +using vespalib::datastore::CompactionStrategy; using vespalib::make_string_short::fmt; @@ -197,7 +198,7 @@ getStoreConfig(const ProtonConfig::Summary::Cache & cache, const HwInfo & hwInfo } LogDocumentStore::Config -deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::Memory & flush, const HwInfo & hwInfo) { +deriveConfig(const ProtonConfig::Summary & summary, const HwInfo & hwInfo) { DocumentStore::Config config(getStoreConfig(summary.cache, hwInfo)); const ProtonConfig::Summary::Log & log(summary.log); const ProtonConfig::Summary::Log::Chunk & chunk(log.chunk); @@ -205,7 +206,6 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M LogDataStore::Config logConfig; logConfig.setMaxFileSize(log.maxfilesize) .setMaxNumLids(log.maxnumlids) - .setMaxDiskBloatFactor(std::min(flush.diskbloatfactor, flush.each.diskbloatfactor)) .setMaxBucketSpread(log.maxbucketspread).setMinFileSizeFactor(log.minfilesizefactor) .compactCompression(deriveCompression(log.compact.compression)) .setFileConfig(fileConfig).disableCrcOnRead(chunk.skipcrconread); @@ -213,7 +213,7 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M } search::LogDocumentStore::Config buildStoreConfig(const ProtonConfig & proton, const HwInfo & hwInfo) { - return deriveConfig(proton.summary, proton.flush.memory, hwInfo); + return deriveConfig(proton.summary, hwInfo); } using AttributesConfigSP = DocumentDBConfig::AttributesConfigSP; @@ -264,7 +264,7 @@ build_alloc_config(const ProtonConfig& proton_config, const vespalib::string& do auto& alloc_config = document_db_config_entry.allocation; auto& distribution_config = proton_config.distribution; search::GrowStrategy grow_strategy(alloc_config.initialnumdocs, alloc_config.growfactor, alloc_config.growbias, alloc_config.multivaluegrowfactor); - search::CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio); + CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio); return std::make_shared<const AllocConfig> (AllocStrategy(grow_strategy, compaction_strategy, alloc_config.amortizecount), distribution_config.redundancy, distribution_config.searchablecopies); diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp index 1ec639a2e21..3c9bdb2ec5f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp @@ -26,7 +26,7 @@ DocumentSubDBCollection::DocumentSubDBCollection( const IGetSerialNum &getSerialNum, const DocTypeName &docTypeName, searchcorespi::index::IThreadingService &writeService, - vespalib::SyncableThreadExecutor &warmupExecutor, + vespalib::Executor &warmupExecutor, const search::common::FileHeaderContext &fileHeaderContext, MetricsWireService &metricsWireService, DocumentDBTaggedMetrics &metrics, @@ -303,11 +303,11 @@ DocumentSubDBCollection::close() } void -DocumentSubDBCollection::setBucketStateCalculator(const IBucketStateCalculatorSP &calc) +DocumentSubDBCollection::setBucketStateCalculator(const IBucketStateCalculatorSP &calc, OnDone onDone) { _calc = calc; for (auto subDb : _subDBs) { - subDb->setBucketStateCalculator(calc); + subDb->setBucketStateCalculator(calc, onDone); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h index d309593681c..ca092bb0957 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h +++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h @@ -7,11 +7,12 @@ #include <vespa/searchcore/proton/persistenceengine/i_document_retriever.h> #include <vespa/searchlib/common/serialnum.h> #include <vespa/vespalib/util/varholder.h> +#include <vespa/vespalib/util/idestructorcallback.h> #include <mutex> namespace vespalib { class Clock; - class SyncableThreadExecutor; + class Executor; class ThreadStackExecutorBase; } @@ -58,6 +59,7 @@ public: using SubDBVector = std::vector<IDocumentSubDB *>; using const_iterator = SubDBVector::const_iterator; using SerialNum = search::SerialNum; + using OnDone = std::shared_ptr<vespalib::IDestructorCallback>; private: using IFeedViewSP = std::shared_ptr<IFeedView>; @@ -84,7 +86,7 @@ public: const IGetSerialNum &getSerialNum, const DocTypeName &docTypeName, searchcorespi::index::IThreadingService &writeService, - vespalib::SyncableThreadExecutor &warmupExecutor, + vespalib::Executor &warmupExecutor, const search::common::FileHeaderContext &fileHeaderContext, MetricsWireService &metricsWireService, DocumentDBTaggedMetrics &metrics, @@ -95,7 +97,7 @@ public: const HwInfo &hwInfo); ~DocumentSubDBCollection(); - void setBucketStateCalculator(const IBucketStateCalculatorSP &calc); + void setBucketStateCalculator(const IBucketStateCalculatorSP &calc, OnDone onDone); void createRetrievers(); void maintenanceSync(MaintenanceController &mc); diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.cpp b/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.cpp index c044b544675..2bccbe468ce 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.cpp @@ -2,17 +2,21 @@ #include "executor_explorer_utils.h" #include <vespa/vespalib/data/slime/cursor.h> +#include <vespa/vespalib/util/adaptive_sequenced_executor.h> #include <vespa/vespalib/util/blockingthreadstackexecutor.h> +#include <vespa/vespalib/util/sequencedtaskexecutor.h> #include <vespa/vespalib/util/singleexecutor.h> #include <vespa/vespalib/util/threadstackexecutor.h> +using vespalib::AdaptiveSequencedExecutor; using vespalib::BlockingThreadStackExecutor; +using vespalib::ISequencedTaskExecutor; +using vespalib::SequencedTaskExecutor; using vespalib::SingleExecutor; using vespalib::ThreadExecutor; using vespalib::ThreadStackExecutor; using vespalib::slime::Cursor; - namespace proton::explorer { namespace { @@ -33,6 +37,32 @@ convert_single_executor_to_slime(const SingleExecutor& executor, Cursor& object) object.setDouble("reaction_time_sec", vespalib::to_s(executor.get_reaction_time())); } +void +set_type(Cursor& object, const vespalib::string& type) +{ + object.setString("type", type); +} + +void +convert_sequenced_executor_to_slime(const SequencedTaskExecutor& executor, Cursor& object) +{ + set_type(object, "SequencedTaskExecutor"); + object.setLong("num_executors", executor.getNumExecutors()); + convert_executor_to_slime(executor.first_executor(), object.setObject("executor")); +} + +void +convert_adaptive_executor_to_slime(const AdaptiveSequencedExecutor& executor, Cursor& object) +{ + set_type(object, "AdaptiveSequencedExecutor"); + object.setLong("num_strands", executor.getNumExecutors()); + auto cfg = executor.get_config(); + object.setLong("num_threads", cfg.num_threads); + object.setLong("max_waiting", cfg.max_waiting); + object.setLong("max_pending", cfg.max_pending); + object.setLong("wakeup_limit", cfg.wakeup_limit); +} + } void @@ -52,5 +82,21 @@ convert_executor_to_slime(const ThreadExecutor* executor, Cursor& object) } } +void +convert_executor_to_slime(const ISequencedTaskExecutor* executor, Cursor& object) +{ + if (executor == nullptr) { + return; + } + if (const auto* seq = dynamic_cast<const SequencedTaskExecutor*>(executor)) { + convert_sequenced_executor_to_slime(*seq, object); + } else if (const auto* ada = dynamic_cast<const AdaptiveSequencedExecutor*>(executor)) { + convert_adaptive_executor_to_slime(*ada, object); + } else { + set_type(object, "ISequencedTaskExecutor"); + object.setLong("num_executors", executor->getNumExecutors()); + } +} + } diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.h b/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.h index 7ed0d073ded..aa7bfabd00b 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.h +++ b/searchcore/src/vespa/searchcore/proton/server/executor_explorer_utils.h @@ -2,15 +2,23 @@ #pragma once -namespace vespalib { class ThreadExecutor; } +namespace vespalib { +class ISequencedTaskExecutor; +class ThreadExecutor; +} namespace vespalib::slime { struct Cursor; } namespace proton::explorer { /** - * Utility to convert an executor to slime for use with a state explorer. + * Utility to convert a thread executor to slime for use with a state explorer. */ void convert_executor_to_slime(const vespalib::ThreadExecutor* executor, vespalib::slime::Cursor& object); +/** + * Utility to convert a sequenced task executor to slime for use with a state explorer. + */ +void convert_executor_to_slime(const vespalib::ISequencedTaskExecutor* executor, vespalib::slime::Cursor& object); + } diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.cpp b/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.cpp index 684132b34e7..74f6a622661 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.cpp @@ -9,6 +9,7 @@ using vespalib::makeLambdaTask; using vespalib::Executor; using vespalib::Gate; using vespalib::Runnable; +using vespalib::ThreadExecutor; using vespalib::SyncableThreadExecutor; namespace proton { @@ -29,11 +30,15 @@ sampleThreadId(FastOS_ThreadId *threadId) } std::unique_ptr<internal::ThreadId> -getThreadId(SyncableThreadExecutor &executor) +getThreadId(ThreadExecutor &executor) { std::unique_ptr<internal::ThreadId> id = std::make_unique<internal::ThreadId>(); - executor.execute(makeLambdaTask([threadId=&id->_id] { sampleThreadId(threadId);})); - executor.sync(); + vespalib::Gate gate; + executor.execute(makeLambdaTask([threadId=&id->_id, &gate] { + sampleThreadId(threadId); + gate.countDown(); + })); + gate.await(); return id; } @@ -46,7 +51,7 @@ runRunnable(Runnable *runnable, Gate *gate) } // namespace -ExecutorThreadService::ExecutorThreadService(SyncableThreadExecutor &executor) +ExecutorThreadService::ExecutorThreadService(ThreadExecutor &executor) : _executor(executor), _threadId(getThreadId(executor)) { @@ -90,4 +95,51 @@ ExecutorThreadService::wakeup() { _executor.wakeup(); } +SyncableExecutorThreadService::SyncableExecutorThreadService(SyncableThreadExecutor &executor) + : _executor(executor), + _threadId(getThreadId(executor)) +{ +} + +SyncableExecutorThreadService::~SyncableExecutorThreadService() = default; + +void +SyncableExecutorThreadService::run(Runnable &runnable) +{ + if (isCurrentThread()) { + runnable.run(); + } else { + Gate gate; + _executor.execute(makeLambdaTask([runnablePtr=&runnable, gatePtr=&gate] { runRunnable(runnablePtr, gatePtr); })); + gate.await(); + } +} + +bool +SyncableExecutorThreadService::isCurrentThread() const +{ + FastOS_ThreadId currentThreadId = FastOS_Thread::GetCurrentThreadId(); + return FastOS_Thread::CompareThreadIds(_threadId->_id, currentThreadId); +} + +vespalib::ExecutorStats +SyncableExecutorThreadService::getStats() { + return _executor.getStats(); +} + +void +SyncableExecutorThreadService::setTaskLimit(uint32_t taskLimit) { + _executor.setTaskLimit(taskLimit); +} + +uint32_t +SyncableExecutorThreadService::getTaskLimit() const { + return _executor.getTaskLimit(); +} + +void +SyncableExecutorThreadService::wakeup() { + _executor.wakeup(); +} + } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.h b/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.h index 44a330ca696..7298b81611a 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.h +++ b/searchcore/src/vespa/searchcore/proton/server/executor_thread_service.h @@ -14,11 +14,11 @@ namespace internal { struct ThreadId; } class ExecutorThreadService : public searchcorespi::index::IThreadService { private: - vespalib::SyncableThreadExecutor &_executor; + vespalib::ThreadExecutor &_executor; std::unique_ptr<internal::ThreadId> _threadId; public: - ExecutorThreadService(vespalib::SyncableThreadExecutor &executor); + ExecutorThreadService(vespalib::ThreadExecutor &executor); ~ExecutorThreadService(); vespalib::ExecutorStats getStats() override; @@ -27,14 +27,36 @@ public: return _executor.execute(std::move(task)); } void run(vespalib::Runnable &runnable) override; + + bool isCurrentThread() const override; + size_t getNumThreads() const override { return _executor.getNumThreads(); } + + void setTaskLimit(uint32_t taskLimit) override; + uint32_t getTaskLimit() const override; + void wakeup() override; +}; + +class SyncableExecutorThreadService : public searchcorespi::index::ISyncableThreadService +{ +private: + vespalib::SyncableThreadExecutor &_executor; + std::unique_ptr<internal::ThreadId> _threadId; + +public: + SyncableExecutorThreadService(vespalib::SyncableThreadExecutor &executor); + ~SyncableExecutorThreadService(); + + vespalib::ExecutorStats getStats() override; + + vespalib::Executor::Task::UP execute(vespalib::Executor::Task::UP task) override { + return _executor.execute(std::move(task)); + } + void run(vespalib::Runnable &runnable) override; vespalib::Syncable &sync() override { _executor.sync(); return *this; } - ExecutorThreadService & shutdown() override { - _executor.shutdown(); - return *this; - } + bool isCurrentThread() const override; size_t getNumThreads() const override { return _executor.getNumThreads(); } diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp index 89de400216e..5bbbf1ca57d 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp @@ -4,62 +4,12 @@ #include "executor_threading_service_explorer.h" #include "executorthreadingservice.h" #include <vespa/vespalib/data/slime/cursor.h> -#include <vespa/vespalib/util/adaptive_sequenced_executor.h> -#include <vespa/vespalib/util/sequencedtaskexecutor.h> - -using vespalib::AdaptiveSequencedExecutor; -using vespalib::ISequencedTaskExecutor; -using vespalib::SequencedTaskExecutor; -using vespalib::slime::Cursor; namespace proton { using explorer::convert_executor_to_slime; -namespace { - -void -set_type(Cursor& object, const vespalib::string& type) -{ - object.setString("type", type); -} - -void -convert_sequenced_executor_to_slime(const SequencedTaskExecutor& executor, Cursor& object) -{ - set_type(object, "SequencedTaskExecutor"); - object.setLong("num_executors", executor.getNumExecutors()); - convert_executor_to_slime(executor.first_executor(), object.setObject("executor")); -} - -void -convert_adaptive_executor_to_slime(const AdaptiveSequencedExecutor& executor, Cursor& object) -{ - set_type(object, "AdaptiveSequencedExecutor"); - object.setLong("num_strands", executor.getNumExecutors()); - auto cfg = executor.get_config(); - object.setLong("num_threads", cfg.num_threads); - object.setLong("max_waiting", cfg.max_waiting); - object.setLong("max_pending", cfg.max_pending); - object.setLong("wakeup_limit", cfg.wakeup_limit); -} - -void -convert_executor_to_slime(const ISequencedTaskExecutor* executor, Cursor& object) -{ - if (const auto* seq = dynamic_cast<const SequencedTaskExecutor*>(executor)) { - convert_sequenced_executor_to_slime(*seq, object); - } else if (const auto* ada = dynamic_cast<const AdaptiveSequencedExecutor*>(executor)) { - convert_adaptive_executor_to_slime(*ada, object); - } else { - set_type(object, "ISequencedTaskExecutor"); - object.setLong("num_executors", executor->getNumExecutors()); - } -} - -} - -ExecutorThreadingServiceExplorer::ExecutorThreadingServiceExplorer(ExecutorThreadingService& service) +ExecutorThreadingServiceExplorer::ExecutorThreadingServiceExplorer(searchcorespi::index::IThreadingService& service) : _service(service) { } @@ -71,9 +21,9 @@ ExecutorThreadingServiceExplorer::get_state(const vespalib::slime::Inserter& ins { auto& object = inserter.insertObject(); if (full) { - convert_executor_to_slime(&_service.getMasterExecutor(), object.setObject("master")); - convert_executor_to_slime(&_service.getIndexExecutor(), object.setObject("index")); - convert_executor_to_slime(&_service.getSummaryExecutor(), object.setObject("summary")); + convert_executor_to_slime(&_service.master(), object.setObject("master")); + convert_executor_to_slime(&_service.index(), object.setObject("index")); + convert_executor_to_slime(&_service.summary(), object.setObject("summary")); convert_executor_to_slime(&_service.indexFieldInverter(), object.setObject("index_field_inverter")); convert_executor_to_slime(&_service.indexFieldWriter(), object.setObject("index_field_writer")); convert_executor_to_slime(&_service.attributeFieldWriter(), object.setObject("attribute_field_writer")); diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.h b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.h index 374a0e6b494..f0bb20ab64e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.h +++ b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.h @@ -4,6 +4,7 @@ #include <vespa/vespalib/net/state_explorer.h> +namespace searchcorespi::index { struct IThreadingService; } namespace proton { class ExecutorThreadingService; @@ -13,10 +14,10 @@ class ExecutorThreadingService; */ class ExecutorThreadingServiceExplorer : public vespalib::StateExplorer { private: - ExecutorThreadingService& _service; + searchcorespi::index::IThreadingService& _service; public: - ExecutorThreadingServiceExplorer(ExecutorThreadingService& service); + ExecutorThreadingServiceExplorer(searchcorespi::index::IThreadingService& service); ~ExecutorThreadingServiceExplorer(); void get_state(const vespalib::slime::Inserter& inserter, bool full) const override; diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp index dcd29c9ddcb..36c8070f140 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp @@ -39,10 +39,12 @@ VESPA_THREAD_STACK_TAG(field_writer_executor) } ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor &sharedExecutor, uint32_t num_treads) - : ExecutorThreadingService(sharedExecutor, ThreadingServiceConfig::make(num_treads)) + : ExecutorThreadingService(sharedExecutor, nullptr, nullptr, ThreadingServiceConfig::make(num_treads)) {} ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sharedExecutor, + vespalib::ISequencedTaskExecutor* field_writer, + vespalib::InvokeService * invokerService, const ThreadingServiceConfig& cfg, uint32_t stackSize) @@ -54,35 +56,52 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sha _summaryExecutor(createExecutorWithOneThread(stackSize, cfg.defaultTaskLimit(), cfg.optimize(), summary_executor)), _masterService(_masterExecutor), _indexService(*_indexExecutor), - _summaryService(*_summaryExecutor), _indexFieldInverter(), _indexFieldWriter(), _attributeFieldWriter(), _field_writer(), _index_field_inverter_ptr(), _index_field_writer_ptr(), - _attribute_field_writer_ptr() + _attribute_field_writer_ptr(), + _invokeRegistrations() { + if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) { + _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_indexExecutor.get()](){ executor->wakeup();})); + _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_summaryExecutor.get()](){ executor->wakeup();})); + } if (_shared_field_writer == SharedFieldWriterExecutor::INDEX) { _field_writer = SequencedTaskExecutor::create(field_writer_executor, cfg.indexingThreads() * 2, cfg.defaultTaskLimit()); _attributeFieldWriter = SequencedTaskExecutor::create(attribute_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit(), - cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime()); + cfg.optimize(), cfg.kindOfwatermark()); + if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) { + _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_attributeFieldWriter.get()](){ executor->wakeup();})); + } _index_field_inverter_ptr = _field_writer.get(); _index_field_writer_ptr = _field_writer.get(); _attribute_field_writer_ptr = _attributeFieldWriter.get(); } else if (_shared_field_writer == SharedFieldWriterExecutor::INDEX_AND_ATTRIBUTE) { _field_writer = SequencedTaskExecutor::create(field_writer_executor, cfg.indexingThreads() * 3, cfg.defaultTaskLimit(), - cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime()); + cfg.optimize(), cfg.kindOfwatermark()); + if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) { + _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_field_writer.get()](){ executor->wakeup();})); + } _index_field_inverter_ptr = _field_writer.get(); _index_field_writer_ptr = _field_writer.get(); _attribute_field_writer_ptr = _field_writer.get(); + } else if (_shared_field_writer == SharedFieldWriterExecutor::DOCUMENT_DB) { + assert(field_writer != nullptr); + _index_field_inverter_ptr = field_writer; + _index_field_writer_ptr = field_writer; + _attribute_field_writer_ptr = field_writer; } else { - // TODO: Add support for shared field writer across all document dbs. _indexFieldInverter = SequencedTaskExecutor::create(index_field_inverter_executor, cfg.indexingThreads(), cfg.defaultTaskLimit()); _indexFieldWriter = SequencedTaskExecutor::create(index_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit()); _attributeFieldWriter = SequencedTaskExecutor::create(attribute_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit(), - cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime()); + cfg.optimize(), cfg.kindOfwatermark()); + if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) { + _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_attributeFieldWriter.get()](){ executor->wakeup();})); + } _index_field_inverter_ptr = _indexFieldInverter.get(); _index_field_writer_ptr = _indexFieldWriter.get(); _attribute_field_writer_ptr = _attributeFieldWriter.get(); @@ -92,14 +111,6 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sha ExecutorThreadingService::~ExecutorThreadingService() = default; void -ExecutorThreadingService::sync_all_executors() { - // We have multiple patterns where task A posts to B which post back to A - for (size_t i = 0; i < 2; i++) { - syncOnce(); - } -} - -void ExecutorThreadingService::blocking_master_execute(vespalib::Executor::Task::UP task) { uint32_t limit = master_task_limit(); @@ -110,31 +121,12 @@ ExecutorThreadingService::blocking_master_execute(vespalib::Executor::Task::UP t } void -ExecutorThreadingService::syncOnce() { - bool isMasterThread = _masterService.isCurrentThread(); - if (!isMasterThread) { - _masterExecutor.sync(); - } - _attribute_field_writer_ptr->sync_all(); - _indexExecutor->sync(); - _summaryExecutor->sync(); - _index_field_inverter_ptr->sync_all(); - _index_field_writer_ptr->sync_all(); - if (!isMasterThread) { - _masterExecutor.sync(); - } -} - -void ExecutorThreadingService::shutdown() { - _masterExecutor.shutdown(); - _masterExecutor.sync(); + _masterExecutor.shutdown().sync(); _attribute_field_writer_ptr->sync_all(); - _summaryExecutor->shutdown(); - _summaryExecutor->sync(); - _indexExecutor->shutdown(); - _indexExecutor->sync(); + _summaryExecutor->shutdown().sync(); + _indexExecutor->shutdown().sync(); _index_field_inverter_ptr->sync_all(); _index_field_writer_ptr->sync_all(); } @@ -147,6 +139,7 @@ ExecutorThreadingService::set_task_limits(uint32_t master_task_limit, _master_task_limit.store(master_task_limit, std::memory_order_release); _indexExecutor->setTaskLimit(field_task_limit); _summaryExecutor->setTaskLimit(summary_task_limit); + // TODO: Move this to a common place when the field writer is always shared. _index_field_inverter_ptr->setTaskLimit(field_task_limit); _index_field_writer_ptr->setTaskLimit(field_task_limit); _attribute_field_writer_ptr->setTaskLimit(field_task_limit); @@ -158,21 +151,25 @@ ExecutorThreadingService::getStats() auto master_stats = _masterExecutor.getStats(); auto index_stats = _indexExecutor->getStats(); auto summary_stats = _summaryExecutor->getStats(); - auto shared_stats = _sharedExecutor.getStats(); if (_shared_field_writer == SharedFieldWriterExecutor::INDEX) { auto field_writer_stats = _field_writer->getStats(); - return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, shared_stats, + return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, field_writer_stats, field_writer_stats, _attribute_field_writer_ptr->getStats()); } else if (_shared_field_writer == SharedFieldWriterExecutor::INDEX_AND_ATTRIBUTE) { auto field_writer_stats = _field_writer->getStats(); - return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, shared_stats, + return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, field_writer_stats, field_writer_stats, field_writer_stats); + } else if (_shared_field_writer == SharedFieldWriterExecutor::DOCUMENT_DB) { + vespalib::ExecutorStats empty_stats; + // In this case the field writer stats are reported at a higher level. + return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, + empty_stats, empty_stats, empty_stats); } else { - return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, shared_stats, + return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, _index_field_inverter_ptr->getStats(), _index_field_writer_ptr->getStats(), _attribute_field_writer_ptr->getStats()); diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h index 629c5043ed7..8572f7126d6 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h +++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h @@ -5,6 +5,8 @@ #include "threading_service_config.h" #include <vespa/searchcorespi/index/ithreadingservice.h> #include <vespa/vespalib/util/threadstackexecutor.h> +#include <vespa/vespalib/util/invokeservice.h> +#include <atomic> namespace proton { @@ -17,15 +19,15 @@ class ExecutorThreadingServiceStats; class ExecutorThreadingService : public searchcorespi::index::IThreadingService { private: + using Registration = std::unique_ptr<vespalib::IDestructorCallback>; vespalib::ThreadExecutor & _sharedExecutor; vespalib::ThreadStackExecutor _masterExecutor; ThreadingServiceConfig::SharedFieldWriterExecutor _shared_field_writer; std::atomic<uint32_t> _master_task_limit; std::unique_ptr<vespalib::SyncableThreadExecutor> _indexExecutor; std::unique_ptr<vespalib::SyncableThreadExecutor> _summaryExecutor; - ExecutorThreadService _masterService; + SyncableExecutorThreadService _masterService; ExecutorThreadService _indexService; - ExecutorThreadService _summaryService; std::unique_ptr<vespalib::ISequencedTaskExecutor> _indexFieldInverter; std::unique_ptr<vespalib::ISequencedTaskExecutor> _indexFieldWriter; std::unique_ptr<vespalib::ISequencedTaskExecutor> _attributeFieldWriter; @@ -33,8 +35,8 @@ private: vespalib::ISequencedTaskExecutor* _index_field_inverter_ptr; vespalib::ISequencedTaskExecutor* _index_field_writer_ptr; vespalib::ISequencedTaskExecutor* _attribute_field_writer_ptr; + std::vector<Registration> _invokeRegistrations; - void syncOnce(); public: using OptimizeFor = vespalib::Executor::OptimizeFor; /** @@ -43,12 +45,12 @@ public: ExecutorThreadingService(vespalib::ThreadExecutor& sharedExecutor, uint32_t num_treads = 1); ExecutorThreadingService(vespalib::ThreadExecutor& sharedExecutor, + vespalib::ISequencedTaskExecutor* field_writer, + vespalib::InvokeService * invokeService, const ThreadingServiceConfig& cfg, uint32_t stackSize = 128 * 1024); ~ExecutorThreadingService() override; - void sync_all_executors() override; - void blocking_master_execute(vespalib::Executor::Task::UP task) override; void shutdown(); @@ -60,27 +62,15 @@ public: uint32_t field_task_limit, uint32_t summary_task_limit); - // Expose the underlying executors for stats fetching and testing. - // TOD: Remove - This is only used for casting to check the underlying type - vespalib::ThreadExecutor &getMasterExecutor() { - return _masterExecutor; - } - vespalib::ThreadExecutor &getIndexExecutor() { - return *_indexExecutor; - } - vespalib::ThreadExecutor &getSummaryExecutor() { - return *_summaryExecutor; - } - - searchcorespi::index::IThreadService &master() override { + searchcorespi::index::ISyncableThreadService &master() override { return _masterService; } searchcorespi::index::IThreadService &index() override { return _indexService; } - searchcorespi::index::IThreadService &summary() override { - return _summaryService; + vespalib::ThreadExecutor &summary() override { + return *_summaryExecutor; } vespalib::ThreadExecutor &shared() override { return _sharedExecutor; diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp index 40a1a1a45f3..f8a6253ead1 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp @@ -276,7 +276,10 @@ FastAccessDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const } _iFeedView.set(_fastAccessFeedView.get()); if (isNodeRetired()) { - reconfigureAttributesConsideringNodeState(); + // TODO Should probably ahve a similar OnDone callback to applyConfig too. + vespalib::Gate gate; + reconfigureAttributesConsideringNodeState(std::make_shared<vespalib::GateCallback>(gate)); + gate.await(); } } return tasks; diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp index e2b3887c60c..db8046dac23 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp @@ -55,9 +55,9 @@ FastAccessFeedView::removeAttributes(SerialNum serialNum, const LidVector &lidsT } void -FastAccessFeedView::heartBeatAttributes(SerialNum serialNum) +FastAccessFeedView::heartBeatAttributes(SerialNum serialNum, DoneCallback onDone) { - _attributeWriter->heartBeat(serialNum); + _attributeWriter->heartBeat(serialNum, onDone); } FastAccessFeedView::FastAccessFeedView(StoreOnlyFeedView::Context storeOnlyCtx, const PersistentParams ¶ms, const Context &ctx) @@ -69,13 +69,13 @@ FastAccessFeedView::FastAccessFeedView(StoreOnlyFeedView::Context storeOnlyCtx, FastAccessFeedView::~FastAccessFeedView() = default; void -FastAccessFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op) +FastAccessFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) { // Drain pending PutDoneContext and ForceCommitContext objects forceCommitAndWait(search::CommitParam(op.getSerialNum())); _docIdLimit.set(op.getLidLimit()); getAttributeWriter()->compactLidSpace(op.getLidLimit(), op.getSerialNum()); - Parent::handleCompactLidSpace(op); + Parent::handleCompactLidSpace(op, onDone); } void diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.h b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.h index a27bf04d701..efe47401855 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.h +++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.h @@ -45,7 +45,7 @@ private: void removeAttributes(SerialNum serialNum, const LidVector &lidsToRemove, OnWriteDoneType onWriteDone) override; - void heartBeatAttributes(SerialNum serialNum) override; + void heartBeatAttributes(SerialNum serialNum, DoneCallback onDone) override; protected: void internalForceCommit(const CommitParam & param, OnForceCommitDoneType onCommitDone) override; @@ -62,7 +62,7 @@ public: return _docIdLimit; } - void handleCompactLidSpace(const CompactLidSpaceOperation &op) override; + void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp new file mode 100644 index 00000000000..f5665c47529 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp @@ -0,0 +1,83 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "feed_handler_stats.h" +#include <cassert> +#include <vespa/log/log.h> + +LOG_SETUP(".proton.server.feed_handler_stats"); + +namespace proton { + +namespace { + +template <typename T> +void update_min_max(T value, std::optional<T>& min, std::optional<T>& max) +{ + if (!min.has_value() || value < min.value()) { + min = value; + } + if (!max.has_value() || value > max.value()) { + max = value; + } +} + +} + +FeedHandlerStats::FeedHandlerStats(uint64_t commits, uint64_t operations, double total_latency) noexcept + : _commits(commits), + _operations(operations), + _total_latency(total_latency), + _min_operations(), + _max_operations(), + _min_latency(), + _max_latency() +{ +} + +FeedHandlerStats::FeedHandlerStats() noexcept + : FeedHandlerStats(0, 0, 0.0) +{ +} + +FeedHandlerStats::~FeedHandlerStats() = default; + + +FeedHandlerStats& +FeedHandlerStats::operator-=(const FeedHandlerStats& rhs) noexcept +{ + _commits -= rhs._commits; + _operations -= rhs._operations; + _total_latency -= rhs._total_latency; + return *this; +} + +void +FeedHandlerStats::add_commit(uint32_t operations, double latency) noexcept +{ + ++_commits; + _operations += operations; + _total_latency += latency; + update_min_max(operations, _min_operations, _max_operations); + update_min_max(latency, _min_latency, _max_latency); +} + +void +FeedHandlerStats::reset_min_max() noexcept +{ + _min_operations.reset(); + _max_operations.reset(); + _min_latency.reset(); + _max_latency.reset(); +} + +void +FeedOperationCounter::commitCompleted(size_t numOperations) { + assert(_commitsStarted > _commitsCompleted); + assert(_operationsStarted >= _operationsCompleted + numOperations); + _operationsCompleted += numOperations; + _commitsCompleted++; + LOG(spam, "%zu: onCommitDone(%zu) total=%zu left=%zu", + _commitsCompleted, numOperations, _operationsCompleted, operationsInFlight()); +} + +} diff --git a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h new file mode 100644 index 00000000000..db93c157046 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h @@ -0,0 +1,76 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <cstdint> +#include <optional> + +namespace proton { + +/* + * Stats for feed handler. + */ +class FeedHandlerStats +{ + uint64_t _commits; + uint64_t _operations; + double _total_latency; + std::optional<uint32_t> _min_operations; + std::optional<uint32_t> _max_operations; + std::optional<double> _min_latency; + std::optional<double> _max_latency; + +public: + FeedHandlerStats(uint64_t commits, uint64_t operations, double total_latency) noexcept; + FeedHandlerStats() noexcept; + ~FeedHandlerStats(); + FeedHandlerStats& operator-=(const FeedHandlerStats& rhs) noexcept; + void add_commit(uint32_t operations, double latency) noexcept; + void reset_min_max() noexcept; + uint64_t get_commits() noexcept { return _commits; } + uint64_t get_operations() noexcept { return _operations; } + double get_total_latency() noexcept { return _total_latency; } + const std::optional<uint32_t>& get_min_operations() noexcept { return _min_operations; } + const std::optional<uint32_t>& get_max_operations() noexcept { return _max_operations; } + const std::optional<double>& get_min_latency() noexcept { return _min_latency; } + const std::optional<double>& get_max_latency() noexcept { return _max_latency; } +}; + +/** + * Keeps track of feed operations started, completed and being committed. + * Also tracks started and completed commit operations. + */ +class FeedOperationCounter { +public: + FeedOperationCounter() + : _operationsStarted(0), + _operationsCompleted(0), + _operationsStartedAtLastCommitStart(0), + _commitsStarted(0), + _commitsCompleted(0) + {} + void startOperation() { ++_operationsStarted; } + void startCommit() { + _commitsStarted++; + _operationsStartedAtLastCommitStart = _operationsStarted; + } + + void commitCompleted(size_t numOperations); + + size_t operationsSinceLastCommitStart() const { + return _operationsStarted - _operationsStartedAtLastCommitStart; + } + size_t operationsInFlight() const { return _operationsStarted - _operationsCompleted; } + size_t commitsInFlight() const { return _commitsStarted - _commitsCompleted; } + bool shouldScheduleCommit() const { + return (operationsInFlight() > 0) && (commitsInFlight() == 0); + } +private: + size_t _operationsStarted; + size_t _operationsCompleted; + size_t _operationsStartedAtLastCommitStart; + size_t _commitsStarted; + size_t _commitsCompleted; +}; + +} diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp index bb03f48882f..8b99c39dd65 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "ddbstate.h" #include "feedhandler.h" +#include "ddbstate.h" #include "feedstates.h" #include "i_feed_handler_owner.h" #include "ifeedview.h" @@ -23,7 +23,6 @@ #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/util/lambdatask.h> #include <cassert> -#include <unistd.h> #include <vespa/log/log.h> LOG_SETUP(".proton.server.feedhandler"); @@ -99,7 +98,7 @@ TlsMgrWriter::sync(SerialNum syncTo) bool res = _tls_mgr.getSession()->sync(syncTo, syncedTo); if (!res) { LOG(debug, "Tls sync failed, retrying"); - sleep(1); + std::this_thread::sleep_for(100ms); continue; } if (syncedTo >= syncTo) { @@ -281,10 +280,10 @@ FeedHandler::performDeleteBucket(FeedToken token, DeleteBucketOperation &op) { _activeFeedView->prepareDeleteBucket(op); appendOperation(op, token); // Delete documents in bucket - _activeFeedView->handleDeleteBucket(op); + _activeFeedView->handleDeleteBucket(op, token); // Delete bucket itself, should no longer have documents. _bucketDBHandler->handleDeleteBucket(op.getBucketId()); - + initiateCommit(vespalib::steady_clock::now()); } void @@ -375,7 +374,9 @@ FeedHandler::changeFeedState(FeedStateSP newState) if (_writeService.master().isCurrentThread()) { doChangeFeedState(std::move(newState)); } else { - _writeService.master().execute(makeLambdaTask([this, newState=std::move(newState)] () { doChangeFeedState(std::move(newState));})); + _writeService.master().execute(makeLambdaTask([this, newState=std::move(newState)] () { + doChangeFeedState(std::move(newState)); + })); _writeService.master().sync(); } } @@ -414,11 +415,9 @@ FeedHandler::FeedHandler(IThreadingService &writeService, _tlsReplayProgress(), _serialNum(0), _prunedSerialNum(0), - _replay_end_serial_num(0u), - _prepare_serial_num(0u), - _numOperationsPendingCommit(0), - _numOperationsCompleted(0), - _numCommitsCompleted(0), + _replay_end_serial_num(0), + _prepare_serial_num(0), + _numOperations(), _delayedPrune(false), _feedLock(), _feedState(make_shared<InitState>(getDocTypeName())), @@ -429,7 +428,9 @@ FeedHandler::FeedHandler(IThreadingService &writeService, _syncLock(), _syncedSerialNum(0), _allowSync(false), - _heart_beat_time(vespalib::steady_time()) + _heart_beat_time(vespalib::steady_time()), + _stats_lock(), + _stats() { } @@ -515,30 +516,32 @@ FeedHandler::getTransactionLogReplayDone() const { } void -FeedHandler::onCommitDone(size_t numPendingAtStart) { - assert(numPendingAtStart <= _numOperationsPendingCommit); - _numOperationsPendingCommit -= numPendingAtStart; - _numOperationsCompleted += numPendingAtStart; - _numCommitsCompleted++; - if (_numOperationsPendingCommit > 0) { +FeedHandler::onCommitDone(size_t numOperations, vespalib::steady_time start_time) { + _numOperations.commitCompleted(numOperations); + if (_numOperations.shouldScheduleCommit()) { enqueCommitTask(); } - LOG(spam, "%zu: onCommitDone(%zu) total=%zu left=%zu", - _numCommitsCompleted, numPendingAtStart, _numOperationsCompleted, _numOperationsPendingCommit); + vespalib::steady_time now = vespalib::steady_clock::now(); + auto latency = vespalib::to_s(now - start_time); + std::lock_guard guard(_stats_lock); + _stats.add_commit(numOperations, latency); } void FeedHandler::enqueCommitTask() { - _writeService.master().execute(makeLambdaTask([this]() { initiateCommit(); })); + _writeService.master().execute(makeLambdaTask([this, start_time(vespalib::steady_clock::now())]() { + initiateCommit(start_time); + })); } void -FeedHandler::initiateCommit() { +FeedHandler::initiateCommit(vespalib::steady_time start_time) { auto onCommitDoneContext = std::make_shared<OnCommitDone>( _writeService.master(), - makeLambdaTask([this, numPendingAtStart=_numOperationsPendingCommit]() { - onCommitDone(numPendingAtStart); + makeLambdaTask([this, operations=_numOperations.operationsSinceLastCommitStart(), start_time]() { + onCommitDone(operations, start_time); })); auto commitResult = _tlsWriter->startCommit(onCommitDoneContext); + _numOperations.startCommit(); if (_activeFeedView) { using KeepAlivePair = vespalib::KeepAlive<std::pair<CommitResult, DoneCallback>>; auto pair = std::make_pair(std::move(commitResult), std::move(onCommitDoneContext)); @@ -552,7 +555,8 @@ FeedHandler::appendOperation(const FeedOperation &op, TlsWriter::DoneCallback on const_cast<FeedOperation &>(op).setSerialNum(inc_serial_num()); } _tlsWriter->appendOperation(op, std::move(onDone)); - if (++_numOperationsPendingCommit == 1) { + _numOperations.startOperation(); + if (_numOperations.operationsInFlight() == 1) { enqueCommitTask(); } } @@ -748,7 +752,7 @@ FeedHandler::handleOperation(FeedToken token, FeedOperation::UP op) } void -FeedHandler::handleMove(MoveOperation &op, std::shared_ptr<vespalib::IDestructorCallback> moveDoneCtx) +FeedHandler::handleMove(MoveOperation &op, vespalib::IDestructorCallback::SP moveDoneCtx) { assert(_writeService.master().isCurrentThread()); op.set_prepare_serial_num(inc_prepare_serial_num()); @@ -765,7 +769,7 @@ FeedHandler::heartBeat() { assert(_writeService.master().isCurrentThread()); _heart_beat_time.store(vespalib::steady_clock::now()); - _activeFeedView->heartBeat(_serialNum); + _activeFeedView->heartBeat(_serialNum, vespalib::IDestructorCallback::SP()); } FeedHandler::RPC::Result @@ -787,13 +791,13 @@ FeedHandler::eof() } void -FeedHandler:: -performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp) +FeedHandler::performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp) { const LidVectorContext::SP lids_to_remove = pruneOp.getLidsToRemove(); + vespalib::IDestructorCallback::SP onDone; if (lids_to_remove && lids_to_remove->getNumLids() != 0) { - appendOperation(pruneOp, DoneCallback()); - _activeFeedView->handlePruneRemovedDocuments(pruneOp); + appendOperation(pruneOp, onDone); + _activeFeedView->handlePruneRemovedDocuments(pruneOp, onDone); } } @@ -822,4 +826,14 @@ FeedHandler::get_heart_beat_time() const return _heart_beat_time.load(std::memory_order_relaxed); } +FeedHandlerStats +FeedHandler::get_stats(bool reset_min_max) const { + std::lock_guard guard(_stats_lock); + auto result = _stats; + if (reset_min_max) { + _stats.reset_min_max(); + } + return result; +} + } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h index ef15b268086..417d9c21548 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h +++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h @@ -2,6 +2,7 @@ #pragma once +#include "feed_handler_stats.h" #include "i_inc_serial_num.h" #include "i_operation_storer.h" #include "idocumentmovehandler.h" @@ -82,9 +83,7 @@ private: // the serial num of the last feed operation in the transaction log at startup before replay SerialNum _replay_end_serial_num; uint64_t _prepare_serial_num; - size_t _numOperationsPendingCommit; - size_t _numOperationsCompleted; - size_t _numCommitsCompleted; + FeedOperationCounter _numOperations; bool _delayedPrune; mutable std::shared_mutex _feedLock; FeedStateSP _feedState; @@ -97,6 +96,8 @@ private: SerialNum _syncedSerialNum; bool _allowSync; // Sanity check std::atomic<vespalib::steady_time> _heart_beat_time; + mutable std::mutex _stats_lock; + mutable FeedHandlerStats _stats; /** * Delayed handling of feed operations, in master write thread. @@ -134,8 +135,8 @@ private: FeedStateSP getFeedState() const; void changeFeedState(FeedStateSP newState); void doChangeFeedState(FeedStateSP newState); - void onCommitDone(size_t numPendingAtStart); - void initiateCommit(); + void onCommitDone(size_t numPendingAtStart, vespalib::steady_time start_time); + void initiateCommit(vespalib::steady_time start_time); void enqueCommitTask(); public: FeedHandler(const FeedHandler &) = delete; @@ -245,6 +246,7 @@ public: [[nodiscard]] CommitResult storeOperationSync(const FeedOperation & op); void considerDelayedPrune(); vespalib::steady_time get_heart_beat_time() const; + FeedHandlerStats get_stats(bool reset_min_max) const; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp b/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp index 8b616dc672e..d2626a0d9f4 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp @@ -20,6 +20,7 @@ using search::transactionlog::client::RPC; using search::SerialNum; using vespalib::Executor; using vespalib::makeLambdaTask; +using vespalib::IDestructorCallback; using vespalib::make_string; using proton::bucketdb::IBucketDBHandler; @@ -83,7 +84,7 @@ public: } void replay(const DeleteBucketOperation &op) override { - _feed_view_ptr->handleDeleteBucket(op); + _feed_view_ptr->handleDeleteBucket(op, IDestructorCallback::SP()); } void replay(const SplitBucketOperation &op) override { _bucketDBHandler.handleSplit(op.getSerialNum(), op.getSource(), @@ -94,15 +95,15 @@ public: op.getSource2(), op.getTarget()); } void replay(const PruneRemovedDocumentsOperation &op) override { - _feed_view_ptr->handlePruneRemovedDocuments(op); + _feed_view_ptr->handlePruneRemovedDocuments(op, IDestructorCallback::SP()); } void replay(const MoveOperation &op) override { - _feed_view_ptr->handleMove(op, vespalib::IDestructorCallback::SP()); + _feed_view_ptr->handleMove(op, IDestructorCallback::SP()); } void replay(const CreateBucketOperation &) override { } void replay(const CompactLidSpaceOperation &op) override { - _feed_view_ptr->handleCompactLidSpace(op); + _feed_view_ptr->handleCompactLidSpace(op, IDestructorCallback::SP()); } NewConfigOperation::IStreamHandler &getNewConfigStreamHandler() override { return _config_store; diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp index 9641f6157c4..9a525731d0d 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp @@ -5,6 +5,7 @@ #include <vespa/searchcore/proton/common/hw_info_sampler.h> #include <vespa/config/print/fileconfigwriter.h> #include <vespa/config-bucketspaces.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/searchcommon/common/schemaconfigurer.h> #include <vespa/vespalib/io/fileutil.h> @@ -25,7 +26,6 @@ LOG_SETUP(".proton.server.fileconfigmanager"); using document::DocumentTypeRepo; using document::DocumentTypeRepoFactory; -using document::DocumenttypesConfig; using search::IndexMetaInfo; using search::SerialNum; using search::index::Schema; diff --git a/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h b/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h index 60e3d1c6081..b3c14173b0e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h +++ b/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h @@ -27,9 +27,8 @@ struct IDocumentScanIterator * Returns an invalid document if no documents satisfy the limit. * * @param compactLidLimit The returned document must have lid larger than this limit. - * @param retry Whether we should start the scan with the previous returned document. */ - virtual search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) = 0; + virtual search::DocumentMetaData next(uint32_t compactLidLimit) = 0; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h b/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h index 0d6bb07b173..704b54dc566 100644 --- a/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h +++ b/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h @@ -6,8 +6,6 @@ #include <vespa/vespalib/stllike/string.h> #include <memory> -namespace vespalib { class ThreadStackExecutorBase; } - namespace proton { class DocumentDBConfigOwner; @@ -19,7 +17,7 @@ class DocumentDBConfigOwner; class IProtonConfigurerOwner { public: - using InitializeThreads = std::shared_ptr<vespalib::SyncableThreadExecutor>; + using InitializeThreads = std::shared_ptr<vespalib::ThreadExecutor>; virtual ~IProtonConfigurerOwner() { } virtual std::shared_ptr<DocumentDBConfigOwner> addDocumentDB(const DocTypeName &docTypeName, document::BucketSpace bucketSpace, diff --git a/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h new file mode 100644 index 00000000000..dfa48cb8d1a --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h @@ -0,0 +1,50 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +namespace vespalib { +class ISequencedTaskExecutor; +class ThreadExecutor; +class InvokeService; +} + +namespace proton { + +/** + * Interface containing the thread executors that are shared across all document dbs. + */ +class ISharedThreadingService { +public: + virtual ~ISharedThreadingService() {} + + /** + * Returns the executor used for warmup (e.g. index warmup). + */ + virtual vespalib::ThreadExecutor& warmup() = 0; + + /** + * Returns the shared executor used for various assisting tasks in a document db. + * + * Example usages include: + * - Disk index fusion. + * - Updating nearest neighbor index (in DenseTensorAttribute). + * - Loading nearest neighbor index (in DenseTensorAttribute). + * - Writing of data in the document store. + */ + virtual vespalib::ThreadExecutor& shared() = 0; + + /** + * Returns the sequenced executor used to write index and attribute fields in a document db. + * + * This is a nullptr if the field writer is not shared across all document dbs. + * TODO: Make this a reference when it is always shared. + */ + virtual vespalib::ISequencedTaskExecutor* field_writer() = 0; + + /** + * Returns an InvokeService intended for regular wakeup calls. + */ + virtual vespalib::InvokeService & invokeService() = 0; +}; + +} + diff --git a/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h b/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h index b60a5ad8175..9534f346d1f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h +++ b/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h @@ -15,6 +15,7 @@ struct IBucketStateCalculator virtual bool nodeUp() const = 0; virtual bool nodeInitializing() const = 0; virtual bool nodeRetired() const = 0; + virtual bool nodeMaintenance() const noexcept = 0; virtual ~IBucketStateCalculator() = default; }; diff --git a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h index 00849d6ad31..f84352a4558 100644 --- a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h @@ -6,6 +6,7 @@ #include <vespa/searchlib/common/serialnum.h> #include <vespa/searchlib/util/searchable_stats.h> #include <vespa/vespalib/stllike/string.h> +#include <vespa/vespalib/util/idestructorcallback.h> namespace search::index { class Schema; } @@ -60,6 +61,7 @@ public: using SchemaSP = std::shared_ptr<Schema>; using IFlushTargetList = std::vector<std::shared_ptr<searchcorespi::IFlushTarget>>; using IndexConfig = index::IndexConfig; + using OnDone = std::shared_ptr<vespalib::IDestructorCallback>; public: IDocumentSubDB() { } virtual ~IDocumentSubDB() { } @@ -77,7 +79,7 @@ public: virtual IReprocessingTask::List applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot, SerialNum serialNum, const ReconfigParams ¶ms, IDocumentDBReferenceResolver &resolver) = 0; - virtual void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc) = 0; + virtual void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc, OnDone) = 0; virtual std::shared_ptr<ISearchHandler> getSearchView() const = 0; virtual std::shared_ptr<IFeedView> getFeedView() const = 0; diff --git a/searchcore/src/vespa/searchcore/proton/server/ifeedview.h b/searchcore/src/vespa/searchcore/proton/server/ifeedview.h index 14e6d6811e7..83a91520e5d 100644 --- a/searchcore/src/vespa/searchcore/proton/server/ifeedview.h +++ b/searchcore/src/vespa/searchcore/proton/server/ifeedview.h @@ -29,7 +29,8 @@ protected: IFeedView() = default; public: using SP = std::shared_ptr<IFeedView>; - using DoneCallback = std::shared_ptr<vespalib::IDestructorCallback>; + using DoneCallback = const std::shared_ptr<vespalib::IDestructorCallback> &; + using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>; using CommitParam = search::CommitParam; IFeedView(const IFeedView &) = delete; @@ -55,16 +56,16 @@ public: virtual void prepareRemove(RemoveOperation &rmOp) = 0; virtual void handleRemove(FeedToken token, const RemoveOperation &rmOp) = 0; virtual void prepareDeleteBucket(DeleteBucketOperation &delOp) = 0; - virtual void handleDeleteBucket(const DeleteBucketOperation &delOp) = 0; virtual void prepareMove(MoveOperation &putOp) = 0; + virtual void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) = 0; virtual void handleMove(const MoveOperation &putOp, DoneCallback onDone) = 0; - virtual void heartBeat(search::SerialNum serialNum) = 0; + virtual void heartBeat(search::SerialNum serialNum, DoneCallback onDone) = 0; virtual void forceCommit(const CommitParam & param, DoneCallback onDone) = 0; - void forceCommit(CommitParam param) { forceCommit(param, DoneCallback()); } + virtual void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation & pruneOp, DoneCallback onDone) = 0; + virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) = 0; + void forceCommit(CommitParam param) { forceCommit(param, IDestructorCallbackSP()); } void forceCommit(search::SerialNum serialNum) { forceCommit(CommitParam(serialNum)); } void forceCommitAndWait(CommitParam param); - virtual void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation & pruneOp) = 0; - virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op) = 0; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_handler.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_handler.cpp index 185d197add7..84053786f69 100644 --- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_handler.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_handler.cpp @@ -93,7 +93,7 @@ void LidSpaceCompactionHandler::handleCompactLidSpace(const CompactLidSpaceOperation &op, std::shared_ptr<IDestructorCallback> compact_done_context) { assert(_subDb.sub_db_id() == op.getSubDbId()); - _subDb.feed_view()->handleCompactLidSpace(op); + _subDb.feed_view()->handleCompactLidSpace(op, compact_done_context); _subDb.feed_view()->forceCommit(CommitParam(op.getSerialNum()), std::move(compact_done_context)); } diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp index a5c1d1fc2c9..dcc19501dd1 100644 --- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp @@ -17,7 +17,6 @@ #include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/gate.h> #include <cassert> -#include <thread> #include <vespa/log/log.h> LOG_SETUP(".proton.server.lidspace.compactionjob"); @@ -73,7 +72,7 @@ bool CompactionJob::scanDocuments(const LidUsageStats &stats) { if (_scanItr->valid()) { - DocumentMetaData document = getNextDocument(stats, false); + DocumentMetaData document = getNextDocument(stats); if (document.valid()) { Bucket metaBucket(document::Bucket(_bucketSpace, document.bucketId)); _bucketExecutor.execute(metaBucket, std::make_unique<MoveTask>(shared_from_this(), document, getLimiter().beginOperation())); @@ -190,9 +189,9 @@ CompactionJob::create(const DocumentDBLidSpaceCompactionConfig &config, } DocumentMetaData -CompactionJob::getNextDocument(const LidUsageStats &stats, bool retryLastDocument) +CompactionJob::getNextDocument(const LidUsageStats &stats) { - return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids()), retryLastDocument); + return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids())); } bool @@ -201,7 +200,6 @@ CompactionJob::run() if (isBlocked()) { return true; // indicate work is done since no work can be done } - LidUsageStats stats = _handler->getLidStatus(); if (remove_batch_is_ongoing()) { // Note that we don't set the job as blocked as the decision to un-block it is not driven externally. LOG(info, "%s: Lid space compaction is disabled while remove batch (delete buckets) is ongoing", @@ -223,7 +221,13 @@ CompactionJob::run() } if (_scanItr && !_scanItr->valid()) { - if (shouldRestartScanDocuments(_handler->getLidStatus())) { + bool numPending = getLimiter().numPending(); + if (numPending > 0) { + // We must wait to decide if a rescan is necessary until all operations are completed + return false; + } + LidUsageStats stats = _handler->getLidStatus(); + if (shouldRestartScanDocuments(stats)) { _scanItr = _handler->getIterator(); } else { _scanItr = IDocumentScanIterator::UP(); @@ -232,6 +236,7 @@ CompactionJob::run() } } + LidUsageStats stats = _handler->getLidStatus(); if (_scanItr) { return scanDocuments(stats); } else if (_shouldCompactLidSpace) { diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h index 917ff12be4a..fcdcc322f65 100644 --- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h +++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h @@ -58,7 +58,7 @@ private: void compactLidSpace(const search::LidUsageStats &stats); bool remove_batch_is_ongoing() const; bool remove_is_ongoing() const; - search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats, bool retryLastDocument); + search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats); bool scanDocuments(const search::LidUsageStats &stats); static void moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen, @@ -98,4 +98,3 @@ public: }; } // namespace proton - diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp index c4826bba8ea..0d75464a161 100644 --- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp @@ -31,15 +31,15 @@ public: }; bool -isRunningOrRunnable(const MaintenanceJobRunner & job, const Executor * master) { +isRunnable(const MaintenanceJobRunner & job, const Executor * master) { return (&job.getExecutor() == master) - ? job.isRunning() + ? false : job.isRunnable(); } } -MaintenanceController::MaintenanceController(IThreadService &masterThread, +MaintenanceController::MaintenanceController(ISyncableThreadService &masterThread, vespalib::Executor & defaultExecutor, MonitoredRefCount & refCount, const DocTypeName &docTypeName) @@ -99,14 +99,14 @@ MaintenanceController::killJobs() job->stop(); // Make sure no more tasks are added to the executor } for (auto &job : _jobs) { - while (isRunningOrRunnable(*job, &_masterThread)) { + while (isRunnable(*job, &_masterThread)) { std::this_thread::sleep_for(1ms); } } - JobList tmpJobs = _jobs; + JobList tmpJobs; { Guard guard(_jobsLock); - _jobs.clear(); + tmpJobs.swap(_jobs); } // Hold jobs until existing tasks have been drained _masterThread.execute(makeLambdaTask([this, jobs=std::move(tmpJobs)]() { @@ -140,6 +140,11 @@ MaintenanceController::stop() _masterThread.sync(); // Wait for already scheduled maintenance jobs and performHoldJobs } +searchcorespi::index::IThreadService & +MaintenanceController::masterThread() { + return _masterThread; +} + void MaintenanceController::kill() { diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h index 8c8cc3e2d43..f2c425b2fd0 100644 --- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h +++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h @@ -17,7 +17,10 @@ class MonitoredRefCount; class Timer; } -namespace searchcorespi::index { struct IThreadService; } +namespace searchcorespi::index { + struct IThreadService; + struct ISyncableThreadService; +} namespace proton { @@ -33,12 +36,13 @@ class MaintenanceController { public: using IThreadService = searchcorespi::index::IThreadService; + using ISyncableThreadService = searchcorespi::index::ISyncableThreadService; using DocumentDBMaintenanceConfigSP = std::shared_ptr<DocumentDBMaintenanceConfig>; using JobList = std::vector<std::shared_ptr<MaintenanceJobRunner>>; using UP = std::unique_ptr<MaintenanceController>; enum class State {INITIALIZING, STARTED, PAUSED, STOPPING}; - MaintenanceController(IThreadService &masterThread, vespalib::Executor & defaultExecutor, vespalib::MonitoredRefCount & refCount, const DocTypeName &docTypeName); + MaintenanceController(ISyncableThreadService &masterThread, vespalib::Executor & defaultExecutor, vespalib::MonitoredRefCount & refCount, const DocTypeName &docTypeName); ~MaintenanceController(); void registerJobInMasterThread(IMaintenanceJob::UP job); @@ -70,14 +74,14 @@ public: const MaintenanceDocumentSubDB & getReadySubDB() const { return _readySubDB; } const MaintenanceDocumentSubDB & getRemSubDB() const { return _remSubDB; } const MaintenanceDocumentSubDB & getNotReadySubDB() const { return _notReadySubDB; } - IThreadService & masterThread() { return _masterThread; } + IThreadService & masterThread(); const DocTypeName & getDocTypeName() const { return _docTypeName; } vespalib::RetainGuard retainDB() { return vespalib::RetainGuard(_refCount); } private: using Mutex = std::mutex; using Guard = std::lock_guard<Mutex>; - IThreadService &_masterThread; + ISyncableThreadService &_masterThread; vespalib::Executor &_defaultExecutor; vespalib::MonitoredRefCount &_refCount; MaintenanceDocumentSubDB _readySubDB; diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp index 16d74479ebe..9eb0596ff1f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp @@ -77,13 +77,6 @@ MaintenanceJobRunner::MaintenanceJobRunner(Executor &executor, IMaintenanceJob:: } bool -MaintenanceJobRunner::isRunning() const -{ - Guard guard(_lock); - return _running; -} - -bool MaintenanceJobRunner::isRunnable() const { Guard guard(_lock); diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h index 151cc302cd3..17f244e6621 100644 --- a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h +++ b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h @@ -31,7 +31,6 @@ public: MaintenanceJobRunner(vespalib::Executor &executor, IMaintenanceJob::UP job); void run() override; void stop(); - bool isRunning() const; bool isRunnable() const; const vespalib::Executor & getExecutor() const { return _executor; } const IMaintenanceJob &getJob() const { return *_job; } diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp index 275f9029107..0bcbbc14650 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp @@ -38,7 +38,9 @@ #include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/mmap_file_allocator_factory.h> #include <vespa/vespalib/util/random.h> +#include <vespa/vespalib/util/sequencedtaskexecutor.h> #include <vespa/vespalib/util/size_literals.h> +#include <vespa/vespalib/util/invokeserviceimpl.h> #ifdef __linux__ #include <malloc.h> #endif @@ -111,15 +113,6 @@ diskMemUsageSamplerConfig(const ProtonConfig &proton, const HwInfo &hwInfo) hwInfo); } -size_t -derive_shared_threads(const ProtonConfig &proton, const HwInfo::Cpu &cpuInfo) { - size_t scaledCores = (size_t)std::ceil(cpuInfo.cores() * proton.feeding.concurrency); - - // We need at least 1 guaranteed free worker in order to ensure progress so #documentsdbs + 1 should suffice, - // but we will not be cheap and give it one extra. - return std::max(scaledCores, proton.documentdb.size() + proton.flush.maxconcurrent + 1); -} - uint32_t computeRpcTransportThreads(const ProtonConfig & cfg, const HwInfo::Cpu &cpuInfo) { bool areSearchAndDocsumAsync = cfg.docsum.async && cfg.search.async; @@ -144,8 +137,6 @@ struct MetricsUpdateHook : metrics::UpdateHook const vespalib::string CUSTOM_COMPONENT_API_PATH = "/state/v1/custom/component"; -VESPA_THREAD_STACK_TAG(proton_shared_executor) -VESPA_THREAD_STACK_TAG(index_warmup_executor) VESPA_THREAD_STACK_TAG(initialize_executor) VESPA_THREAD_STACK_TAG(close_executor) @@ -240,8 +231,7 @@ Proton::Proton(const config::ConfigUri & configUri, _protonDiskLayout(), _protonConfigurer(_executor, *this, _protonDiskLayout), _protonConfigFetcher(configUri, _protonConfigurer, subscribeTimeout), - _warmupExecutor(), - _sharedExecutor(), + _shared_service(), _compile_cache_executor_binding(), _queryLimiter(), _clock(0.001), @@ -333,11 +323,8 @@ Proton::init(const BootstrapConfig::SP & configSnapshot) protonConfig.visit.ignoremaxbytes); vespalib::string fileConfigId; - _warmupExecutor = std::make_unique<vespalib::ThreadStackExecutor>(4, 128_Ki, index_warmup_executor); - - const size_t sharedThreads = derive_shared_threads(protonConfig, hwInfo.cpu()); - _sharedExecutor = std::make_shared<vespalib::BlockingThreadStackExecutor>(sharedThreads, 128_Ki, sharedThreads*16, proton_shared_executor); - _compile_cache_executor_binding = vespalib::eval::CompileCache::bind(_sharedExecutor); + _shared_service = std::make_unique<SharedThreadingService>(SharedThreadingServiceConfig::make(protonConfig, hwInfo.cpu())); + _compile_cache_executor_binding = vespalib::eval::CompileCache::bind(_shared_service->shared_raw()); InitializeThreads initializeThreads; if (protonConfig.initialize.threads > 0) { initializeThreads = std::make_shared<vespalib::ThreadStackExecutor>(protonConfig.initialize.threads, 128_Ki, initialize_executor); @@ -460,11 +447,8 @@ Proton::~Proton() if (_flushEngine) { _flushEngine->close(); } - if (_warmupExecutor) { - _warmupExecutor->sync(); - } - if (_sharedExecutor) { - _sharedExecutor->sync(); + if (_shared_service) { + _shared_service->sync_all_executors(); } if ( ! _documentDBMap.empty()) { @@ -483,9 +467,8 @@ Proton::~Proton() _documentDBMap.clear(); _persistenceEngine.reset(); _tls.reset(); - _warmupExecutor.reset(); _compile_cache_executor_binding.reset(); - _sharedExecutor.reset(); + _shared_service.reset(); _clock.stop(); LOG(debug, "Explicit destructor done"); } @@ -619,11 +602,23 @@ Proton::addDocumentDB(const document::DocumentType &docType, // 1 thread per document type. initializeThreads = std::make_shared<vespalib::ThreadStackExecutor>(1, 128_Ki); } - auto ret = DocumentDB::create(config.basedir + "/documents", documentDBConfig, config.tlsspec, - _queryLimiter, _clock, docTypeName, bucketSpace, config, *this, - *_warmupExecutor, *_sharedExecutor, *_persistenceEngine, *_tls->getTransLogServer(), - *_metricsEngine, _fileHeaderContext, std::move(config_store), - initializeThreads, bootstrapConfig->getHwInfo()); + auto ret = DocumentDB::create(config.basedir + "/documents", + documentDBConfig, + config.tlsspec, + _queryLimiter, + _clock, + docTypeName, + bucketSpace, + config, + *this, + *_shared_service, + *_persistenceEngine, + *_tls->getTransLogServer(), + *_metricsEngine, + _fileHeaderContext, + std::move(config_store), + initializeThreads, + bootstrapConfig->getHwInfo()); try { ret->start(); } catch (vespalib::Exception &e) { @@ -791,11 +786,12 @@ Proton::updateMetrics(const metrics::MetricLockGuard &) if (_summaryEngine) { updateExecutorMetrics(metrics.docsum, _summaryEngine->getExecutorStats()); } - if (_sharedExecutor) { - metrics.shared.update(_sharedExecutor->getStats()); - } - if (_warmupExecutor) { - metrics.warmup.update(_warmupExecutor->getStats()); + if (_shared_service) { + metrics.shared.update(_shared_service->shared().getStats()); + metrics.warmup.update(_shared_service->warmup().getStats()); + if (_shared_service->field_writer()) { + metrics.warmup.update(_shared_service->field_writer()->getStats()); + } } } } @@ -865,10 +861,11 @@ Proton::setClusterState(BucketSpace bucketSpace, const storage::spi::ClusterStat // forward info sent by cluster controller to persistence engine // about whether node is supposed to be up or not. Match engine // needs to know this in order to stop serving queries. - bool nodeUpInBucketSpace(calc.nodeUp()); + bool nodeUpInBucketSpace(calc.nodeUp()); // TODO rename calculator function to imply bucket space affinity bool nodeRetired(calc.nodeRetired()); bool nodeUp = updateNodeUp(bucketSpace, nodeUpInBucketSpace); _matchEngine->setNodeUp(nodeUp); + _matchEngine->setNodeMaintenance(calc.nodeMaintenance()); // Note: _all_ bucket spaces in maintenance if (_memoryFlushConfigUpdater) { _memoryFlushConfigUpdater->setNodeRetired(nodeRetired); } @@ -946,12 +943,13 @@ Proton::get_child(vespalib::stringref name) const return std::make_unique<ResourceUsageExplorer>(_diskMemUsageSampler->writeFilter(), _persistenceEngine->get_resource_usage_tracker()); } else if (name == THREAD_POOLS) { - return std::make_unique<ProtonThreadPoolsExplorer>(_sharedExecutor.get(), + return std::make_unique<ProtonThreadPoolsExplorer>((_shared_service) ? &_shared_service->shared() : nullptr, (_matchEngine) ? &_matchEngine->get_executor() : nullptr, (_summaryEngine) ? &_summaryEngine->get_executor() : nullptr, (_flushEngine) ? &_flushEngine->get_executor() : nullptr, &_executor, - _warmupExecutor.get()); + (_shared_service) ? &_shared_service->warmup() : nullptr, + (_shared_service) ? _shared_service->field_writer() : nullptr); } return Explorer_UP(nullptr); } diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.h b/searchcore/src/vespa/searchcore/proton/server/proton.h index 6b0bef50cae..c18737f22b5 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton.h +++ b/searchcore/src/vespa/searchcore/proton/server/proton.h @@ -12,6 +12,8 @@ #include "proton_config_fetcher.h" #include "proton_configurer.h" #include "rpc_hooks.h" +#include "shared_threading_service.h" +#include <vespa/eval/eval/llvm/compile_cache.h> #include <vespa/searchcore/proton/matching/querylimiter.h> #include <vespa/searchcore/proton/metrics/metrics_engine.h> #include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h> @@ -24,11 +26,12 @@ #include <vespa/vespalib/net/json_handler_repo.h> #include <vespa/vespalib/net/state_explorer.h> #include <vespa/vespalib/util/varholder.h> -#include <vespa/eval/eval/llvm/compile_cache.h> #include <mutex> #include <shared_mutex> -namespace vespalib { class StateServer; } +namespace vespalib { + class StateServer; +} namespace search::transactionlog { class TransLogServerApp; } namespace metrics { class MetricLockGuard; } namespace storage::spi { struct PersistenceProvider; } @@ -58,9 +61,9 @@ private: using MonitorReply = search::engine::MonitorReply; using MonitorClient = search::engine::MonitorClient; using DocumentDBMap = std::map<DocTypeName, DocumentDB::SP>; - using ProtonConfigSP = BootstrapConfig::ProtonConfigSP; - using InitializeThreads = std::shared_ptr<vespalib::SyncableThreadExecutor>; + using InitializeThreads = std::shared_ptr<vespalib::ThreadExecutor>; using BucketSpace = document::BucketSpace; + using InvokeService = vespalib::InvokeService; class ProtonFileHeaderContext : public search::common::FileHeaderContext { @@ -101,8 +104,7 @@ private: std::unique_ptr<IProtonDiskLayout> _protonDiskLayout; ProtonConfigurer _protonConfigurer; ProtonConfigFetcher _protonConfigFetcher; - std::unique_ptr<vespalib::ThreadStackExecutorBase> _warmupExecutor; - std::shared_ptr<vespalib::ThreadStackExecutorBase> _sharedExecutor; + std::unique_ptr<SharedThreadingService> _shared_service; vespalib::eval::CompileCache::ExecutorBinding::UP _compile_cache_executor_binding; matching::QueryLimiter _queryLimiter; vespalib::Clock _clock; @@ -133,6 +135,7 @@ private: uint32_t getDistributionKey() const override { return _distributionKey; } BootstrapConfig::SP getActiveConfigSnapshot() const; std::shared_ptr<IDocumentDBReferenceRegistry> getDocumentDBReferenceRegistry() const override; + // Returns true if the node is up in _any_ bucket space bool updateNodeUp(BucketSpace bucketSpace, bool nodeUpInBucketSpace); void closeDocumentDBs(vespalib::ThreadStackExecutorBase & executor); public: diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp index 7c998ceca7c..2c891927fa3 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp @@ -12,6 +12,7 @@ #include <vespa/document/bucket/fixed_bucket_spaces.h> #include <vespa/config-bucketspaces.h> #include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/retain_guard.h> #include <vespa/vespalib/stllike/asciistream.h> #include <future> @@ -42,7 +43,7 @@ getBucketSpace(const BootstrapConfig &bootstrapConfig, const DocTypeName &name) } -ProtonConfigurer::ProtonConfigurer(vespalib::SyncableThreadExecutor &executor, +ProtonConfigurer::ProtonConfigurer(vespalib::ThreadExecutor &executor, IProtonConfigurerOwner &owner, const std::unique_ptr<IProtonDiskLayout> &diskLayout) : IProtonConfigurer(), @@ -58,9 +59,22 @@ ProtonConfigurer::ProtonConfigurer(vespalib::SyncableThreadExecutor &executor, { } -ProtonConfigurer::~ProtonConfigurer() -{ -} +class ProtonConfigurer::ReconfigureTask : public vespalib::Executor::Task { +public: + ReconfigureTask(ProtonConfigurer & configurer) + : _configurer(configurer), + _retainGuard(configurer._pendingReconfigureTasks) + {} + + void run() override { + _configurer.performReconfigure(); + } +private: + ProtonConfigurer & _configurer; + vespalib::RetainGuard _retainGuard; +}; + +ProtonConfigurer::~ProtonConfigurer() = default; void ProtonConfigurer::setAllowReconfig(bool allowReconfig) @@ -72,11 +86,12 @@ ProtonConfigurer::setAllowReconfig(bool allowReconfig) _allowReconfig = allowReconfig; if (allowReconfig) { // Ensure that pending config is applied - _executor.execute(makeLambdaTask([this]() { performReconfigure(); })); + _executor.execute(std::make_unique<ReconfigureTask>(*this)); } } if (!allowReconfig) { - _executor.sync(); // drain queued performReconfigure tasks + // drain queued performReconfigure tasks + _pendingReconfigureTasks.waitForZeroRefCount(); } } @@ -102,7 +117,7 @@ ProtonConfigurer::reconfigure(std::shared_ptr<ProtonConfigSnapshot> configSnapsh std::lock_guard<std::mutex> guard(_mutex); _pendingConfigSnapshot = configSnapshot; if (_allowReconfig) { - _executor.execute(makeLambdaTask([&]() { performReconfigure(); })); + _executor.execute(std::make_unique<ReconfigureTask>(*this)); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h index 829da0756f8..ddb9c1bed92 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h +++ b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h @@ -7,6 +7,7 @@ #include <vespa/document/bucket/bucketspace.h> #include <vespa/searchcore/proton/common/doctypename.h> #include <vespa/vespalib/net/simple_component_config_producer.h> +#include <vespa/vespalib/util/monitored_refcount.h> #include <map> #include <mutex> @@ -25,17 +26,19 @@ class IProtonDiskLayout; class ProtonConfigurer : public IProtonConfigurer { using DocumentDBs = std::map<DocTypeName, std::pair<std::weak_ptr<IDocumentDBConfigOwner>, std::weak_ptr<DocumentDBDirectoryHolder>>>; - using InitializeThreads = std::shared_ptr<vespalib::SyncableThreadExecutor>; + using InitializeThreads = std::shared_ptr<vespalib::ThreadExecutor>; + class ReconfigureTask; - ExecutorThreadService _executor; - IProtonConfigurerOwner &_owner; - DocumentDBs _documentDBs; - std::shared_ptr<ProtonConfigSnapshot> _pendingConfigSnapshot; - std::shared_ptr<ProtonConfigSnapshot> _activeConfigSnapshot; - mutable std::mutex _mutex; - bool _allowReconfig; - vespalib::SimpleComponentConfigProducer _componentConfig; + ExecutorThreadService _executor; + IProtonConfigurerOwner &_owner; + DocumentDBs _documentDBs; + std::shared_ptr<ProtonConfigSnapshot> _pendingConfigSnapshot; + std::shared_ptr<ProtonConfigSnapshot> _activeConfigSnapshot; + mutable std::mutex _mutex; + bool _allowReconfig; + vespalib::SimpleComponentConfigProducer _componentConfig; const std::unique_ptr<IProtonDiskLayout> &_diskLayout; + vespalib::MonitoredRefCount _pendingReconfigureTasks; void performReconfigure(); bool skipConfig(const ProtonConfigSnapshot *configSnapshot, bool initialConfig); @@ -48,7 +51,7 @@ class ProtonConfigurer : public IProtonConfigurer void pruneInitialDocumentDBDirs(const ProtonConfigSnapshot &configSnapshot); public: - ProtonConfigurer(vespalib::SyncableThreadExecutor &executor, + ProtonConfigurer(vespalib::ThreadExecutor &executor, IProtonConfigurerOwner &owner, const std::unique_ptr<IProtonDiskLayout> &diskLayout); diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.cpp index 73c63da622d..06a51b7e661 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.cpp @@ -5,24 +5,26 @@ #include <vespa/vespalib/data/slime/cursor.h> #include <vespa/vespalib/util/threadexecutor.h> -using vespalib::SyncableThreadExecutor; +using vespalib::ThreadExecutor; namespace proton { using explorer::convert_executor_to_slime; -ProtonThreadPoolsExplorer::ProtonThreadPoolsExplorer(const SyncableThreadExecutor* shared, - const SyncableThreadExecutor* match, - const SyncableThreadExecutor* docsum, - const SyncableThreadExecutor* flush, - const SyncableThreadExecutor* proton, - const SyncableThreadExecutor* warmup) +ProtonThreadPoolsExplorer::ProtonThreadPoolsExplorer(const ThreadExecutor* shared, + const ThreadExecutor* match, + const ThreadExecutor* docsum, + const ThreadExecutor* flush, + const ThreadExecutor* proton, + const ThreadExecutor* warmup, + const vespalib::ISequencedTaskExecutor* field_writer) : _shared(shared), _match(match), _docsum(docsum), _flush(flush), _proton(proton), - _warmup(warmup) + _warmup(warmup), + _field_writer(field_writer) { } @@ -37,6 +39,7 @@ ProtonThreadPoolsExplorer::get_state(const vespalib::slime::Inserter& inserter, convert_executor_to_slime(_flush, object.setObject("flush")); convert_executor_to_slime(_proton, object.setObject("proton")); convert_executor_to_slime(_warmup, object.setObject("warmup")); + convert_executor_to_slime(_field_writer, object.setObject("field_writer")); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.h b/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.h index 7f0873a750d..2cacdd2c336 100644 --- a/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.h +++ b/searchcore/src/vespa/searchcore/proton/server/proton_thread_pools_explorer.h @@ -4,7 +4,10 @@ #include <vespa/vespalib/net/state_explorer.h> -namespace vespalib { class SyncableThreadExecutor; } +namespace vespalib { +class ISequencedTaskExecutor; +class ThreadExecutor; +} namespace proton { @@ -13,20 +16,22 @@ namespace proton { */ class ProtonThreadPoolsExplorer : public vespalib::StateExplorer { private: - const vespalib::SyncableThreadExecutor* _shared; - const vespalib::SyncableThreadExecutor* _match; - const vespalib::SyncableThreadExecutor* _docsum; - const vespalib::SyncableThreadExecutor* _flush; - const vespalib::SyncableThreadExecutor* _proton; - const vespalib::SyncableThreadExecutor* _warmup; + const vespalib::ThreadExecutor* _shared; + const vespalib::ThreadExecutor* _match; + const vespalib::ThreadExecutor* _docsum; + const vespalib::ThreadExecutor* _flush; + const vespalib::ThreadExecutor* _proton; + const vespalib::ThreadExecutor* _warmup; + const vespalib::ISequencedTaskExecutor* _field_writer; public: - ProtonThreadPoolsExplorer(const vespalib::SyncableThreadExecutor* shared, - const vespalib::SyncableThreadExecutor* match, - const vespalib::SyncableThreadExecutor* docsum, - const vespalib::SyncableThreadExecutor* flush, - const vespalib::SyncableThreadExecutor* proton, - const vespalib::SyncableThreadExecutor* warmup); + ProtonThreadPoolsExplorer(const vespalib::ThreadExecutor* shared, + const vespalib::ThreadExecutor* match, + const vespalib::ThreadExecutor* docsum, + const vespalib::ThreadExecutor* flush, + const vespalib::ThreadExecutor* proton, + const vespalib::ThreadExecutor* warmup, + const vespalib::ISequencedTaskExecutor* field_writer); void get_state(const vespalib::slime::Inserter& inserter, bool full) const override; }; diff --git a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp index 323ca9add17..66b1ba1ae2e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp @@ -80,9 +80,12 @@ SearchableFeedView::performIndexPut(SerialNum serialNum, search::DocumentIdT lid } void -SearchableFeedView::heartBeatIndexedFields(SerialNum serialNum) +SearchableFeedView::heartBeatIndexedFields(SerialNum serialNum, DoneCallback onDone) { - _writeService.index().execute(makeLambdaTask([this, serialNum] { performIndexHeartBeat(serialNum); })); + _writeService.index().execute(makeLambdaTask([this, serialNum, onDone] { + (void) onDone; + performIndexHeartBeat(serialNum); + })); } void @@ -154,9 +157,9 @@ SearchableFeedView::removeIndexedFields(SerialNum serialNum, const LidVector &li } void -SearchableFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp) +SearchableFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) { - Parent::internalDeleteBucket(delOp); + Parent::internalDeleteBucket(delOp, onDone); } void @@ -167,9 +170,9 @@ SearchableFeedView::performIndexForceCommit(SerialNum serialNum, OnForceCommitDo } void -SearchableFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op) +SearchableFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) { - Parent::handleCompactLidSpace(op); + Parent::handleCompactLidSpace(op, onDone); vespalib::Gate gate; _writeService.index().execute( makeLambdaTask([this, &op, &gate]() { diff --git a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h index 5ff309446d2..70b02c4a39f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h +++ b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h @@ -41,8 +41,8 @@ private: void performIndexRemove(SerialNum serialNum, const LidVector &lidsToRemove, OnWriteDoneType onWriteDone); void performIndexHeartBeat(SerialNum serialNum); - void internalDeleteBucket(const DeleteBucketOperation &delOp) override; - void heartBeatIndexedFields(SerialNum serialNum) override; + void internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override; + void heartBeatIndexedFields(SerialNum serialNum, DoneCallback onDone) override; void putIndexedFields(SerialNum serialNum, search::DocumentIdT lid, const DocumentSP &newDoc, OnOperationDoneType onWriteDone) override; void updateIndexedFields(SerialNum serialNum, search::DocumentIdT lid, FutureDoc newDoc, OnOperationDoneType onWriteDone) override; @@ -58,7 +58,7 @@ public: ~SearchableFeedView() override; const IIndexWriter::SP &getIndexWriter() const { return _indexWriter; } - void handleCompactLidSpace(const CompactLidSpaceOperation &op) override; + void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp index 0aaf0bfb06f..6e87f33e1c6 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp @@ -180,9 +180,9 @@ SearchableDocSubDB::propagateFlushConfig() } void -SearchableDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc) +SearchableDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc, OnDone onDone) { - FastAccessDocSubDB::setBucketStateCalculator(calc); + FastAccessDocSubDB::setBucketStateCalculator(calc, std::move(onDone)); propagateFlushConfig(); } diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h index 376a319c7e7..e6da0e958e8 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h @@ -45,12 +45,12 @@ public: const FastAccessDocSubDB::Context _fastUpdCtx; matching::QueryLimiter &_queryLimiter; const vespalib::Clock &_clock; - vespalib::SyncableThreadExecutor &_warmupExecutor; + vespalib::Executor &_warmupExecutor; Context(const FastAccessDocSubDB::Context &fastUpdCtx, matching::QueryLimiter &queryLimiter, const vespalib::Clock &clock, - vespalib::SyncableThreadExecutor &warmupExecutor) + vespalib:: Executor &warmupExecutor) : _fastUpdCtx(fastUpdCtx), _queryLimiter(queryLimiter), _clock(clock), @@ -70,7 +70,7 @@ private: vespalib::eval::ConstantValueCache _constantValueCache; matching::ConstantValueRepo _constantValueRepo; SearchableDocSubDBConfigurer _configurer; - vespalib::SyncableThreadExecutor &_warmupExecutor; + vespalib::Executor &_warmupExecutor; std::shared_ptr<GidToLidChangeHandler> _realGidToLidChangeHandler; DocumentDBFlushConfig _flushConfig; @@ -105,7 +105,7 @@ public: IReprocessingTask::List applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot, SerialNum serialNum, const ReconfigParams ¶ms, IDocumentDBReferenceResolver &resolver) override; - void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc) override; + void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc, OnDone onDone) override; void clearViews() override; diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp new file mode 100644 index 00000000000..e32cd6f5f4e --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp @@ -0,0 +1,52 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "shared_threading_service.h" +#include <vespa/vespalib/util/blockingthreadstackexecutor.h> +#include <vespa/vespalib/util/isequencedtaskexecutor.h> +#include <vespa/vespalib/util/sequencedtaskexecutor.h> +#include <vespa/vespalib/util/size_literals.h> +#include <vespa/vespalib/util/invokeserviceimpl.h> + +VESPA_THREAD_STACK_TAG(proton_field_writer_executor) +VESPA_THREAD_STACK_TAG(proton_shared_executor) +VESPA_THREAD_STACK_TAG(proton_warmup_executor) + +namespace proton { + +using SharedFieldWriterExecutor = ThreadingServiceConfig::ProtonConfig::Feeding::SharedFieldWriterExecutor; + +SharedThreadingService::SharedThreadingService(const SharedThreadingServiceConfig& cfg) + : _warmup(cfg.warmup_threads(), 128_Ki, proton_warmup_executor), + _shared(std::make_shared<vespalib::BlockingThreadStackExecutor>(cfg.shared_threads(), 128_Ki, + cfg.shared_task_limit(), proton_shared_executor)), + _field_writer(), + _invokeService(cfg.field_writer_config().reactionTime()), + _invokeRegistrations() +{ + const auto& fw_cfg = cfg.field_writer_config(); + if (fw_cfg.shared_field_writer() == SharedFieldWriterExecutor::DOCUMENT_DB) { + _field_writer = vespalib::SequencedTaskExecutor::create(proton_field_writer_executor, + fw_cfg.indexingThreads() * 3, + fw_cfg.defaultTaskLimit(), + fw_cfg.optimize(), + fw_cfg.kindOfwatermark()); + if (fw_cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT) { + _invokeRegistrations.push_back(_invokeService.registerInvoke([executor = _field_writer.get()]() { + executor->wakeup(); + })); + } + } +} + +SharedThreadingService::~SharedThreadingService() = default; + +void +SharedThreadingService::sync_all_executors() { + _warmup.sync(); + _shared->sync(); + if (_field_writer) { + _field_writer->sync_all(); + } +} + +} diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h new file mode 100644 index 00000000000..cd0e6d71402 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h @@ -0,0 +1,38 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "i_shared_threading_service.h" +#include "shared_threading_service_config.h" +#include <vespa/vespalib/util/threadstackexecutor.h> +#include <vespa/vespalib/util/syncable.h> +#include <vespa/vespalib/util/invokeserviceimpl.h> +#include <memory> + +namespace proton { + +/** + * Class containing the thread executors that are shared across all document dbs. + */ +class SharedThreadingService : public ISharedThreadingService { +private: + using Registration = std::unique_ptr<vespalib::IDestructorCallback>; + vespalib::ThreadStackExecutor _warmup; + std::shared_ptr<vespalib::SyncableThreadExecutor> _shared; + std::unique_ptr<vespalib::ISequencedTaskExecutor> _field_writer; + vespalib::InvokeServiceImpl _invokeService; + std::vector<Registration> _invokeRegistrations; + +public: + SharedThreadingService(const SharedThreadingServiceConfig& cfg); + ~SharedThreadingService(); + + std::shared_ptr<vespalib::Executor> shared_raw() { return _shared; } + void sync_all_executors(); + + vespalib::ThreadExecutor& warmup() override { return _warmup; } + vespalib::ThreadExecutor& shared() override { return *_shared; } + vespalib::ISequencedTaskExecutor* field_writer() override { return _field_writer.get(); } + vespalib::InvokeService & invokeService() override { return _invokeService; } +}; + +} diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.cpp b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.cpp new file mode 100644 index 00000000000..8a81c3f4388 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.cpp @@ -0,0 +1,44 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "shared_threading_service_config.h" +#include <vespa/searchcore/config/config-proton.h> +#include <cmath> + +namespace proton { + +using ProtonConfig = SharedThreadingServiceConfig::ProtonConfig; + +SharedThreadingServiceConfig::SharedThreadingServiceConfig(uint32_t shared_threads_in, + uint32_t shared_task_limit_in, + uint32_t warmup_threads_in, + const ThreadingServiceConfig& field_writer_config_in) + : _shared_threads(shared_threads_in), + _shared_task_limit(shared_task_limit_in), + _warmup_threads(warmup_threads_in), + _field_writer_config(field_writer_config_in) +{ +} + +namespace { + +size_t +derive_shared_threads(const ProtonConfig& cfg, const HwInfo::Cpu& cpu_info) +{ + size_t scaled_cores = (size_t)std::ceil(cpu_info.cores() * cfg.feeding.concurrency); + + // We need at least 1 guaranteed free worker in order to ensure progress. + return std::max(scaled_cores, cfg.documentdb.size() + cfg.flush.maxconcurrent + 1); +} + +} + +SharedThreadingServiceConfig +SharedThreadingServiceConfig::make(const proton::SharedThreadingServiceConfig::ProtonConfig& cfg, + const proton::HwInfo::Cpu& cpu_info) +{ + size_t shared_threads = derive_shared_threads(cfg, cpu_info); + return proton::SharedThreadingServiceConfig(shared_threads, shared_threads * 16, 4, + ThreadingServiceConfig::make(cfg, cfg.feeding.concurrency, cpu_info)); +} + +} diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.h b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.h new file mode 100644 index 00000000000..1214bfa77fa --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service_config.h @@ -0,0 +1,39 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "threading_service_config.h" +#include <vespa/searchcore/proton/common/hw_info.h> + +namespace vespa::config::search::core::internal { class InternalProtonType; } + +namespace proton { + +/** + * Config for the thread executors that are shared across all document dbs. + */ +class SharedThreadingServiceConfig { +public: + using ProtonConfig = const vespa::config::search::core::internal::InternalProtonType; + +private: + uint32_t _shared_threads; + uint32_t _shared_task_limit; + uint32_t _warmup_threads; + ThreadingServiceConfig _field_writer_config; + +public: + SharedThreadingServiceConfig(uint32_t shared_threads_in, + uint32_t shared_task_limit_in, + uint32_t warmup_threads_in, + const ThreadingServiceConfig& field_writer_config_in); + + static SharedThreadingServiceConfig make(const ProtonConfig& cfg, const HwInfo::Cpu& cpu_info); + + uint32_t shared_threads() const { return _shared_threads; } + uint32_t shared_task_limit() const { return _shared_task_limit; } + uint32_t warmup_threads() const { return _warmup_threads; } + const ThreadingServiceConfig& field_writer_config() const { return _field_writer_config; } +}; + +} + diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp index 97e55c37aff..6b1356da50e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp @@ -30,7 +30,6 @@ #include <vespa/log/log.h> LOG_SETUP(".proton.server.storeonlydocsubdb"); -using search::CompactionStrategy; using search::GrowStrategy; using vespalib::makeLambdaTask; using search::index::Schema; @@ -43,6 +42,7 @@ using vespalib::GenericHeader; using search::common::FileHeaderContext; using proton::initializer::InitializerTask; using searchcorespi::IFlushTarget; +using vespalib::datastore::CompactionStrategy; namespace proton { @@ -422,7 +422,7 @@ namespace { constexpr double RETIRED_DEAD_RATIO = 0.5; struct UpdateConfig : public search::attribute::IAttributeFunctor { - UpdateConfig(search::CompactionStrategy compactionStrategy) noexcept + UpdateConfig(CompactionStrategy compactionStrategy) noexcept : _compactionStrategy(compactionStrategy) {} void operator()(search::attribute::IAttributeVector &iAttributeVector) override { @@ -433,15 +433,15 @@ struct UpdateConfig : public search::attribute::IAttributeFunctor { attributeVector->update_config(cfg); } } - search::CompactionStrategy _compactionStrategy; + CompactionStrategy _compactionStrategy; }; } -search::CompactionStrategy -StoreOnlyDocSubDB::computeCompactionStrategy(search::CompactionStrategy strategy) const { +CompactionStrategy +StoreOnlyDocSubDB::computeCompactionStrategy(CompactionStrategy strategy) const { return isNodeRetired() - ? search::CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO) + ? CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO) : strategy; } @@ -460,24 +460,24 @@ StoreOnlyDocSubDB::reconfigure(const search::LogDocumentStore::Config & config, } void -StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> & calc) { +StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> & calc, OnDone onDone) { bool wasNodeRetired = isNodeRetired(); _nodeRetired = calc->nodeRetired(); if (wasNodeRetired != isNodeRetired()) { - search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); + CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); auto cfg = _dms->getConfig(); cfg.setCompactionStrategy(compactionStrategy); _dms->update_config(cfg); - reconfigureAttributesConsideringNodeState(); + reconfigureAttributesConsideringNodeState(std::move(onDone)); } } void -StoreOnlyDocSubDB::reconfigureAttributesConsideringNodeState() { - search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); +StoreOnlyDocSubDB::reconfigureAttributesConsideringNodeState(OnDone onDone) { + CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); auto attrMan = getAttributeManager(); if (attrMan) { - attrMan->asyncForEachAttribute(std::make_shared<UpdateConfig>(compactionStrategy)); + attrMan->asyncForEachAttribute(std::make_shared<UpdateConfig>(compactionStrategy), std::move(onDone)); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h index c7eaa0ad0b1..d43b865c000 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h @@ -153,7 +153,7 @@ private: std::shared_ptr<ShrinkLidSpaceFlushTarget> _dmsShrinkTarget; std::shared_ptr<PendingLidTrackerBase> _pendingLidsForCommit; bool _nodeRetired; - search::CompactionStrategy _lastConfiguredCompactionStrategy; + vespalib::datastore::CompactionStrategy _lastConfiguredCompactionStrategy; IFlushTargetList getFlushTargets() override; protected: @@ -183,7 +183,7 @@ protected: StoreOnlyFeedView::PersistentParams getFeedViewPersistentParams(); vespalib::string getSubDbName() const; void reconfigure(const search::LogDocumentStore::Config & protonConfig, const AllocStrategy& alloc_strategy); - void reconfigureAttributesConsideringNodeState(); + void reconfigureAttributesConsideringNodeState(OnDone onDone); public: StoreOnlyDocSubDB(const Config &cfg, const Context &ctx); ~StoreOnlyDocSubDB() override; @@ -203,7 +203,7 @@ public: IReprocessingTask::List applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot, SerialNum serialNum, const ReconfigParams ¶ms, IDocumentDBReferenceResolver &resolver) override; - void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc) override; + void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc, OnDone onDone) override; ISearchHandler::SP getSearchView() const override { return _iSearchView.get(); } IFeedView::SP getFeedView() const override { return _iFeedView.get(); } @@ -234,7 +234,7 @@ public: std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override; void tearDownReferences(IDocumentDBReferenceResolver &resolver) override; PendingLidTrackerBase & getUncommittedLidsTracker() override { return *_pendingLidsForCommit; } - search::CompactionStrategy computeCompactionStrategy(search::CompactionStrategy strategy) const; + vespalib::datastore::CompactionStrategy computeCompactionStrategy(vespalib::datastore::CompactionStrategy strategy) const; bool isNodeRetired() const { return _nodeRetired; } }; diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp index 5a7afcc584d..97bd940b403 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp @@ -16,7 +16,6 @@ #include <vespa/searchcore/proton/reference/i_gid_to_lid_change_handler.h> #include <vespa/searchcore/proton/reference/i_pending_gid_to_lid_changes.h> #include <vespa/vespalib/util/destructor_callbacks.h> -#include <vespa/searchlib/common/scheduletaskcallback.h> #include <vespa/vespalib/util/isequencedtaskexecutor.h> #include <vespa/vespalib/util/exceptions.h> @@ -180,7 +179,7 @@ StoreOnlyFeedView::forceCommit(const CommitParam & param, DoneCallback onDone) internalForceCommit(param, std::make_shared<ForceCommitContext>(_writeService.master(), _metaStore, _pendingLidsForCommit->produceSnapshot(), _gidToLidChangeHandler.grab_pending_changes(), - std::move(onDone))); + onDone)); } void @@ -263,11 +262,11 @@ StoreOnlyFeedView::internalPut(FeedToken token, const PutOperation &putOp) } void -StoreOnlyFeedView::heartBeatIndexedFields(SerialNum ) {} +StoreOnlyFeedView::heartBeatIndexedFields(SerialNum, DoneCallback ) {} void -StoreOnlyFeedView::heartBeatAttributes(SerialNum ) {} +StoreOnlyFeedView::heartBeatAttributes(SerialNum, DoneCallback ) {} void StoreOnlyFeedView::updateAttributes(SerialNum, Lid, const DocumentUpdate & upd, @@ -368,9 +367,10 @@ StoreOnlyFeedView::removeSummaries(SerialNum serialNum, const LidVector & lids, } void -StoreOnlyFeedView::heartBeatSummary(SerialNum serialNum) { +StoreOnlyFeedView::heartBeatSummary(SerialNum serialNum, DoneCallback onDone) { summaryExecutor().execute( - makeLambdaTask([serialNum, this] { + makeLambdaTask([serialNum, this, onDone] { + (void) onDone; _summaryAdapter->heartBeat(serialNum); })); } @@ -417,7 +417,6 @@ StoreOnlyFeedView::internalUpdate(FeedToken token, const UpdateOperation &updOp) bool updateOk = _metaStore.updateMetaData(updOp.getLid(), updOp.getBucketId(), updOp.getTimestamp()); assert(updateOk); (void) updateOk; - _metaStore.commit(CommitParam(serialNum)); } auto onWriteDone = createUpdateDoneContext(std::move(token), get_pending_lid_token(updOp), updOp.getUpdate()); @@ -605,7 +604,6 @@ StoreOnlyFeedView::adjustMetaStore(const DocumentOperation &op, const GlobalId & gate.await(); removeMetaData(_metaStore, gid, docId, op, _params._subDbType == SubDbType::REMOVED); } - _metaStore.commit(CommitParam(serialNum)); } } @@ -616,14 +614,11 @@ void StoreOnlyFeedView::removeIndexedFields(SerialNum , const LidVector &, OnWriteDoneType ) {} size_t -StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attributes) +StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attributes, DoneCallback onWriteDone) { const SerialNum serialNum = op.getSerialNum(); const LidVectorContext::SP &ctx = op.getLidsToRemove(_params._subDbId); if (!ctx) { - if (useDocumentMetaStore(serialNum)) { - _metaStore.commit(CommitParam(serialNum)); - } return 0; } const LidVector &lidsToRemove(ctx->getLidVector()); @@ -634,13 +629,9 @@ StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remo _gidToLidChangeHandler.notifyRemoves(std::make_shared<vespalib::GateCallback>(gate), gidsToRemove, serialNum); gate.await(); _metaStore.removeBatch(lidsToRemove, ctx->getDocIdLimit()); - _metaStore.commit(CommitParam(serialNum)); _lidReuseDelayer.delayReuse(lidsToRemove); } - std::shared_ptr<vespalib::IDestructorCallback> onWriteDone; - vespalib::Executor::Task::UP removeBatchDoneTask; - removeBatchDoneTask = makeLambdaTask([]() {}); - onWriteDone = std::make_shared<search::ScheduleTaskCallback>(_writeService.master(), std::move(removeBatchDoneTask)); + if (remove_index_and_attributes) { removeIndexedFields(serialNum, lidsToRemove, onWriteDone); removeAttributes(serialNum, lidsToRemove, onWriteDone); @@ -666,15 +657,15 @@ StoreOnlyFeedView::prepareDeleteBucket(DeleteBucketOperation &delOp) } void -StoreOnlyFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp) +StoreOnlyFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) { - internalDeleteBucket(delOp); + internalDeleteBucket(delOp, onDone); } void -StoreOnlyFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp) +StoreOnlyFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) { - size_t rm_count = removeDocuments(delOp, true); + size_t rm_count = removeDocuments(delOp, true, onDone); LOG(debug, "internalDeleteBucket(): docType(%s), bucket(%s), lidsToRemove(%zu)", _params._docTypeName.toString().c_str(), delOp.getBucketId().toString().c_str(), rm_count); } @@ -692,7 +683,7 @@ StoreOnlyFeedView::prepareMove(MoveOperation &moveOp) // CombiningFeedView calls this for both source and target subdb. void -StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::SP doneCtx) +StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, DoneCallback doneCtx) { assert(moveOp.getValidDbdId()); assert(moveOp.getValidPrevDbdId()); @@ -721,29 +712,29 @@ StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback:: putIndexedFields(serialNum, moveOp.getLid(), doc, onWriteDone); } if (docAlreadyExists && moveOp.changedDbdId()) { - internalRemove(std::move(doneCtx), _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid()); + internalRemove(doneCtx, _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid()); } } void -StoreOnlyFeedView::heartBeat(SerialNum serialNum) +StoreOnlyFeedView::heartBeat(SerialNum serialNum, DoneCallback onDone) { assert(_writeService.master().isCurrentThread()); _metaStore.removeAllOldGenerations(); _metaStore.commit(CommitParam(serialNum)); - heartBeatSummary(serialNum); - heartBeatIndexedFields(serialNum); - heartBeatAttributes(serialNum); + heartBeatSummary(serialNum, onDone); + heartBeatIndexedFields(serialNum, onDone); + heartBeatAttributes(serialNum, onDone); } // CombiningFeedView calls this only for the removed subdb. void StoreOnlyFeedView:: -handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) +handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) { assert(_params._subDbType == SubDbType::REMOVED); assert(pruneOp.getSubDbId() == _params._subDbId); - uint32_t rm_count = removeDocuments(pruneOp, false); + uint32_t rm_count = removeDocuments(pruneOp, false, onDone); LOG(debug, "MinimalFeedView::handlePruneRemovedDocuments called, doctype(%s) %u lids pruned, limit %u", _params._docTypeName.toString().c_str(), rm_count, @@ -751,7 +742,7 @@ handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) } void -StoreOnlyFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op) +StoreOnlyFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) { assert(_params._subDbId == op.getSubDbId()); const SerialNum serialNum = op.getSerialNum(); @@ -760,7 +751,7 @@ StoreOnlyFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op) auto commitContext(std::make_shared<ForceCommitContext>(_writeService.master(), _metaStore, _pendingLidsForCommit->produceSnapshot(), _gidToLidChangeHandler.grab_pending_changes(), - DoneCallback())); + onDone)); commitContext->holdUnblockShrinkLidSpace(); internalForceCommit(CommitParam(serialNum), commitContext); } diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h index 4e7a62548c2..c25accaf4a4 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h @@ -53,12 +53,11 @@ public: using LidVector = LidVectorContext::LidVector; using Document = document::Document; using DocumentUpdate = document::DocumentUpdate; - using OnWriteDoneType = const std::shared_ptr<vespalib::IDestructorCallback> &; + using OnWriteDoneType = DoneCallback; using OnForceCommitDoneType =const std::shared_ptr<ForceCommitContext> &; using OnOperationDoneType = const std::shared_ptr<OperationDoneContext> &; using OnPutDoneType = const std::shared_ptr<PutDoneContext> &; using OnRemoveDoneType = const std::shared_ptr<RemoveDoneContext> &; - using FeedTokenUP = std::unique_ptr<FeedToken>; using FutureDoc = std::shared_future<std::unique_ptr<const Document>>; using PromisedDoc = std::promise<std::unique_ptr<const Document>>; using FutureStream = std::future<vespalib::nbostream>; @@ -66,7 +65,6 @@ public: using DocumentSP = std::shared_ptr<Document>; using DocumentUpdateSP = std::shared_ptr<DocumentUpdate>; using LidReuseDelayer = documentmetastore::LidReuseDelayer; - using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>; using Lid = search::DocumentIdT; @@ -153,7 +151,7 @@ protected: IGidToLidChangeHandler &_gidToLidChangeHandler; private: - searchcorespi::index::IThreadService & summaryExecutor() { + vespalib::Executor & summaryExecutor() { return _writeService.summary(); } void putSummary(SerialNum serialNum, Lid lid, FutureStream doc, OnOperationDoneType onDone); @@ -161,7 +159,7 @@ private: void putSummary(SerialNum serialNum, Lid lid, DocumentSP doc, OnOperationDoneType onDone); void removeSummary(SerialNum serialNum, Lid lid, OnWriteDoneType onDone); void removeSummaries(SerialNum serialNum, const LidVector & lids, OnWriteDoneType onDone); - void heartBeatSummary(SerialNum serialNum); + void heartBeatSummary(SerialNum serialNum, DoneCallback onDone); bool useDocumentStore(SerialNum replaySerialNum) const { return replaySerialNum > _params._flushedDocumentStoreSerialNum; @@ -180,7 +178,7 @@ private: // Removes documents from meta store and document store. // returns the number of documents removed. - size_t removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attribute_fields); + size_t removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attribute_fields, DoneCallback onDone); void internalRemove(IDestructorCallbackSP token, IPendingLidTracker::Token uncommitted, SerialNum serialNum, Lid lid); @@ -190,9 +188,9 @@ private: PromisedDoc promisedDoc, PromisedStream promisedStream); protected: - virtual void internalDeleteBucket(const DeleteBucketOperation &delOp); - virtual void heartBeatIndexedFields(SerialNum serialNum); - virtual void heartBeatAttributes(SerialNum serialNum); + virtual void internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone); + virtual void heartBeatIndexedFields(SerialNum serialNum, DoneCallback onDone); + virtual void heartBeatAttributes(SerialNum serialNum, DoneCallback onDone); private: virtual void putAttributes(SerialNum serialNum, Lid lid, const Document &doc, OnPutDoneType onWriteDone); @@ -232,10 +230,10 @@ public: void prepareRemove(RemoveOperation &rmOp) override; void handleRemove(FeedToken token, const RemoveOperation &rmOp) override; void prepareDeleteBucket(DeleteBucketOperation &delOp) override; - void handleDeleteBucket(const DeleteBucketOperation &delOp) override; + void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override; void prepareMove(MoveOperation &putOp) override; - void handleMove(const MoveOperation &putOp, std::shared_ptr<vespalib::IDestructorCallback> doneCtx) override; - void heartBeat(search::SerialNum serialNum) override; + void handleMove(const MoveOperation &putOp, DoneCallback doneCtx) override; + void heartBeat(search::SerialNum serialNum, DoneCallback onDone) override; void forceCommit(const CommitParam & param, DoneCallback onDone) override; /** @@ -244,8 +242,8 @@ public: * * Called by writer thread. */ - void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) override; - void handleCompactLidSpace(const CompactLidSpaceOperation &op) override; + void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) override; + void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override; std::shared_ptr<PendingLidTrackerBase> getUncommittedLidTracker() { return _pendingLidsForCommit; } }; diff --git a/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.h b/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.h index 7f6d9328491..c49649de1e3 100644 --- a/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.h +++ b/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.h @@ -72,7 +72,7 @@ public: /** * Returns the underlying executor. Only used for state explorers. */ - const vespalib::SyncableThreadExecutor& get_executor() const { return _executor; } + const vespalib::ThreadExecutor& get_executor() const { return _executor; } /** * Starts the underlying threads. This will throw a vespalib::Exception if diff --git a/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h b/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h index a5a0185d787..e218058f01e 100644 --- a/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h +++ b/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h @@ -20,6 +20,7 @@ private: bool _clusterUp; bool _nodeUp; bool _nodeRetired; + bool _nodeMaintenance; public: typedef std::shared_ptr<BucketStateCalculator> SP; @@ -28,7 +29,8 @@ public: _asked(), _clusterUp(true), _nodeUp(true), - _nodeRetired(false) + _nodeRetired(false), + _nodeMaintenance(false) { } BucketStateCalculator &addReady(const document::BucketId &bucket) { @@ -54,6 +56,15 @@ public: return *this; } + BucketStateCalculator& setNodeMaintenance(bool maintenance) noexcept { + _nodeMaintenance = maintenance; + if (maintenance) { + _nodeUp = false; + _nodeRetired = false; + } + return *this; + } + const BucketIdVector &asked() const noexcept { return _asked; } void resetAsked() { _asked.clear(); } @@ -67,6 +78,7 @@ public: bool nodeUp() const override { return _nodeUp; } bool nodeRetired() const override { return _nodeRetired; } bool nodeInitializing() const override { return false; } + bool nodeMaintenance() const noexcept override { return _nodeMaintenance; } }; } diff --git a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp index b147667202a..fbd3dbd2402 100644 --- a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp +++ b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp @@ -6,6 +6,7 @@ #include <vespa/config-rank-profiles.h> #include <vespa/config-attributes.h> #include <vespa/config-indexschema.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/searchsummary/config/config-juniperrc.h> #include <vespa/document/config/config-documenttypes.h> @@ -13,7 +14,6 @@ #include <vespa/searchcore/proton/common/alloc_config.h> #include <vespa/searchcore/proton/server/threading_service_config.h> -using document::DocumenttypesConfig; using search::TuneFileDocumentDB; using search::index::Schema; using vespa::config::search::RankProfilesConfig; diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h index d9b83bfc3a8..7194cc4d403 100644 --- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h +++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h @@ -10,13 +10,11 @@ struct DummyDocumentStore : public search::IDocumentStore { vespalib::string _baseDir; - DummyDocumentStore() - : _baseDir("") - {} + DummyDocumentStore() = default; DummyDocumentStore(const vespalib::string &baseDir) : _baseDir(baseDir) {} - ~DummyDocumentStore() {} + ~DummyDocumentStore() = default; DocumentUP read(search::DocumentIdT, const document::DocumentTypeRepo &) const override { return DocumentUP(); } @@ -25,7 +23,8 @@ struct DummyDocumentStore : public search::IDocumentStore void remove(uint64_t, search::DocumentIdT) override {} void flush(uint64_t) override {} uint64_t initFlush(uint64_t) override { return 0; } - void compact(uint64_t) override {} + void compactBloat(uint64_t) override {} + void compactSpread(uint64_t) override {} uint64_t lastSyncToken() const override { return 0; } uint64_t tentativeLastSyncToken() const override { return 0; } vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); } @@ -34,7 +33,7 @@ struct DummyDocumentStore : public search::IDocumentStore size_t memoryMeta() const override { return 0; } size_t getDiskFootprint() const override { return 0; } size_t getDiskBloat() const override { return 0; } - size_t getMaxCompactGain() const override { return getDiskBloat(); } + size_t getMaxSpreadAsBloat() const override { return getDiskBloat(); } search::CacheStats getCacheStats() const override { return search::CacheStats(); } const vespalib::string &getBaseDir() const override { return _baseDir; } void accept(search::IDocumentStoreReadVisitor &, diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h index 5cc0ac5a186..03ddcf3605b 100644 --- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h +++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h @@ -58,7 +58,7 @@ struct DummyDocumentSubDb : public IDocumentSubDB { return IReprocessingTask::List(); } - void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &) override { } + void setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &, OnDone) override { } ISearchHandler::SP getSearchView() const override { return ISearchHandler::SP(); } IFeedView::SP getFeedView() const override { return IFeedView::SP(); } void clearViews() override {} diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h b/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h index af88f7fa6b7..51bb3ebc807 100644 --- a/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h +++ b/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h @@ -25,12 +25,12 @@ struct DummyFeedView : public IFeedView void prepareRemove(RemoveOperation &) override {} void handleRemove(FeedToken, const RemoveOperation &) override {} void prepareDeleteBucket(DeleteBucketOperation &) override {} - void handleDeleteBucket(const DeleteBucketOperation &) override {} + void handleDeleteBucket(const DeleteBucketOperation &, DoneCallback) override {} void prepareMove(MoveOperation &) override {} - void handleMove(const MoveOperation &, std::shared_ptr<vespalib::IDestructorCallback>) override {} - void heartBeat(search::SerialNum) override {} - void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override {} - void handleCompactLidSpace(const CompactLidSpaceOperation &) override {} + void handleMove(const MoveOperation &, DoneCallback) override {} + void heartBeat(search::SerialNum, DoneCallback) override {} + void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override {} + void handleCompactLidSpace(const CompactLidSpaceOperation &, DoneCallback) override {} void forceCommit(const CommitParam &, DoneCallback) override { } }; diff --git a/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h index abc8eb679dd..743cd9af8fc 100644 --- a/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h +++ b/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h @@ -87,7 +87,7 @@ public: return _writables; } void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor>) const override { } - void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor>) const override { } + void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor>, OnDone) const override { } ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &) const override { return ExclusiveAttributeReadAccessor::UP(); diff --git a/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h new file mode 100644 index 00000000000..74965c15cd4 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include <vespa/searchcore/proton/server/i_shared_threading_service.h> +#include <vespa/vespalib/util/invokeserviceimpl.h> + +namespace proton { + +class MockSharedThreadingService : public ISharedThreadingService { +private: + vespalib::ThreadExecutor& _warmup; + vespalib::ThreadExecutor& _shared; + vespalib::InvokeServiceImpl _invokeService; + +public: + MockSharedThreadingService(vespalib::ThreadExecutor& warmup_in, + vespalib::ThreadExecutor& shared_in) + : _warmup(warmup_in), + _shared(shared_in), + _invokeService(10ms) + {} + vespalib::ThreadExecutor& warmup() override { return _warmup; } + vespalib::ThreadExecutor& shared() override { return _shared; } + vespalib::ISequencedTaskExecutor* field_writer() override { return nullptr; } + vespalib::InvokeService & invokeService() override { return _invokeService; } +}; + +} diff --git a/searchcore/src/vespa/searchcore/proton/test/test.h b/searchcore/src/vespa/searchcore/proton/test/test.h index 1494823e899..4231d5e7717 100644 --- a/searchcore/src/vespa/searchcore/proton/test/test.h +++ b/searchcore/src/vespa/searchcore/proton/test/test.h @@ -5,7 +5,6 @@ #include "bucketdocuments.h" #include "bucketstatecalculator.h" #include "document.h" -#include "dummy_document_store.h" #include "dummy_feed_view.h" #include "dummy_summary_manager.h" #include "resulthandler.h" diff --git a/searchcore/src/vespa/searchcore/proton/test/thread_service_observer.h b/searchcore/src/vespa/searchcore/proton/test/thread_service_observer.h index 26a92841999..0f199e10cb1 100644 --- a/searchcore/src/vespa/searchcore/proton/test/thread_service_observer.h +++ b/searchcore/src/vespa/searchcore/proton/test/thread_service_observer.h @@ -5,6 +5,45 @@ namespace proton::test { +class ThreadExecutorObserver : public vespalib::ThreadExecutor +{ +private: + vespalib::ThreadExecutor &_service; + uint32_t _executeCnt; + +public: + ThreadExecutorObserver(vespalib::ThreadExecutor &service) + : _service(service), + _executeCnt(0) + { + } + + uint32_t getExecuteCnt() const { return _executeCnt; } + + vespalib::Executor::Task::UP execute(vespalib::Executor::Task::UP task) override { + ++_executeCnt; + return _service.execute(std::move(task)); + } + + size_t getNumThreads() const override { return _service.getNumThreads(); } + + vespalib::ExecutorStats getStats() override { + return _service.getStats(); + } + + void setTaskLimit(uint32_t taskLimit) override { + _service.setTaskLimit(taskLimit); + } + + uint32_t getTaskLimit() const override { + return _service.getTaskLimit(); + } + + void wakeup() override { + _service.wakeup(); + } +}; + class ThreadServiceObserver : public searchcorespi::index::IThreadService { private: @@ -27,14 +66,56 @@ public: void run(vespalib::Runnable &runnable) override { _service.run(runnable); } + + bool isCurrentThread() const override { + return _service.isCurrentThread(); + } + size_t getNumThreads() const override { return _service.getNumThreads(); } + + vespalib::ExecutorStats getStats() override { + return _service.getStats(); + } + + void setTaskLimit(uint32_t taskLimit) override { + _service.setTaskLimit(taskLimit); + } + + uint32_t getTaskLimit() const override { + return _service.getTaskLimit(); + } + + void wakeup() override { + _service.wakeup(); + } +}; + +class SyncableThreadServiceObserver : public searchcorespi::index::ISyncableThreadService +{ +private: + searchcorespi::index::ISyncableThreadService &_service; + uint32_t _executeCnt; + +public: + SyncableThreadServiceObserver(searchcorespi::index::ISyncableThreadService &service) + : _service(service), + _executeCnt(0) + { + } + + uint32_t getExecuteCnt() const { return _executeCnt; } + + vespalib::Executor::Task::UP execute(vespalib::Executor::Task::UP task) override { + ++_executeCnt; + return _service.execute(std::move(task)); + } + void run(vespalib::Runnable &runnable) override { + _service.run(runnable); + } vespalib::Syncable &sync() override { _service.sync(); return *this; } - ThreadServiceObserver &shutdown() override { - _service.shutdown(); - return *this; - } + bool isCurrentThread() const override { return _service.isCurrentThread(); } @@ -55,7 +136,6 @@ public: void wakeup() override { _service.wakeup(); } - }; } diff --git a/searchcore/src/vespa/searchcore/proton/test/thread_utils.h b/searchcore/src/vespa/searchcore/proton/test/thread_utils.h index 60a4a95ea8e..d193a9555c3 100644 --- a/searchcore/src/vespa/searchcore/proton/test/thread_utils.h +++ b/searchcore/src/vespa/searchcore/proton/test/thread_utils.h @@ -14,7 +14,17 @@ void runInMasterAndSync(searchcorespi::index::IThreadingService &writeService, FunctionType func) { writeService.master().execute(vespalib::makeLambdaTask(std::move(func))); - writeService.sync_all_executors(); + writeService.master().sync(); +} + +/** + * Run the given function in the master thread. + */ +template <typename FunctionType> +void +runInMaster(searchcorespi::index::IThreadingService &writeService, FunctionType func) +{ + writeService.master().execute(vespalib::makeLambdaTask(std::move(func))); } } diff --git a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h index 46527362091..e93b1632b3f 100644 --- a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h +++ b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h @@ -12,10 +12,10 @@ class ThreadingServiceObserver : public searchcorespi::index::IThreadingService { private: searchcorespi::index::IThreadingService &_service; - ThreadServiceObserver _master; - ThreadServiceObserver _index; - ThreadServiceObserver _summary; - vespalib::ThreadExecutor & _shared; + SyncableThreadServiceObserver _master; + ThreadServiceObserver _index; + ThreadExecutorObserver _summary; + vespalib::ThreadExecutor & _shared; vespalib::SequencedTaskExecutorObserver _indexFieldInverter; vespalib::SequencedTaskExecutorObserver _indexFieldWriter; vespalib::SequencedTaskExecutorObserver _attributeFieldWriter; @@ -23,31 +23,27 @@ private: public: ThreadingServiceObserver(searchcorespi::index::IThreadingService &service); ~ThreadingServiceObserver() override; - const ThreadServiceObserver &masterObserver() const { + const SyncableThreadServiceObserver &masterObserver() const { return _master; } const ThreadServiceObserver &indexObserver() const { return _index; } - const ThreadServiceObserver &summaryObserver() const { + const ThreadExecutorObserver &summaryObserver() const { return _summary; } - void sync_all_executors() override { - _service.sync_all_executors(); - } - void blocking_master_execute(vespalib::Executor::Task::UP task) override { _service.blocking_master_execute(std::move(task)); } - searchcorespi::index::IThreadService &master() override { + searchcorespi::index::ISyncableThreadService &master() override { return _master; } searchcorespi::index::IThreadService &index() override { return _index; } - searchcorespi::index::IThreadService &summary() override { + vespalib::ThreadExecutor &summary() override { return _summary; } vespalib::ThreadExecutor &shared() override { diff --git a/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h b/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h index b4e51e2dd1b..f973908b62d 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h +++ b/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h @@ -9,7 +9,7 @@ namespace searchcorespi::index { /** * Interface for a single thread used for write tasks. */ -struct IThreadService : public vespalib::SyncableThreadExecutor +struct IThreadService : public vespalib::ThreadExecutor { IThreadService(const IThreadService &) = delete; IThreadService & operator = (const IThreadService &) = delete; @@ -25,6 +25,9 @@ struct IThreadService : public vespalib::SyncableThreadExecutor * Returns whether the current thread is the underlying thread. */ virtual bool isCurrentThread() const = 0; +}; + +struct ISyncableThreadService : public IThreadService, vespalib::Syncable { }; diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp index 19469d59d1b..a2bd19c3d29 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp @@ -328,10 +328,8 @@ IndexMaintainer::flushMemoryIndex(IMemoryIndex &memoryIndex, updateDiskIndexSchema(flushDir, *prunedSchema, noSerialNumHigh); } IndexWriteUtilities::writeSourceSelector(saveInfo, indexId, getAttrTune(), - _ctx.getFileHeaderContext(), - serialNum); - IndexWriteUtilities::writeSerialNum(serialNum, flushDir, - _ctx.getFileHeaderContext()); + _ctx.getFileHeaderContext(), serialNum); + IndexWriteUtilities::writeSerialNum(serialNum, flushDir, _ctx.getFileHeaderContext()); return loadDiskIndex(flushDir); } @@ -696,7 +694,7 @@ IndexMaintainer::doneFusion(FusionArgs *args, IDiskIndex::SP *new_index) } bool -IndexMaintainer::makeSureAllRemainingWarmupIsDone(ISearchableIndexCollection::SP keepAlive) +IndexMaintainer::makeSureAllRemainingWarmupIsDone(std::shared_ptr<WarmupIndexCollection> keepAlive) { // called by warmupDone via reconfigurer, warmupDone() doesn't wait for us assert(_ctx.getThreadingService().master().isCurrentThread()); @@ -713,13 +711,13 @@ IndexMaintainer::makeSureAllRemainingWarmupIsDone(ISearchableIndexCollection::SP LOG(info, "New index warmed up and switched in : %s", warmIndex->toString().c_str()); } LOG(info, "Sync warmupExecutor."); - _ctx.getWarmupExecutor().sync(); + keepAlive->drainPending(); LOG(info, "Now the keep alive of the warmupindexcollection should be gone."); return true; } void -IndexMaintainer::warmupDone(ISearchableIndexCollection::SP current) +IndexMaintainer::warmupDone(std::shared_ptr<WarmupIndexCollection> current) { // Called by a search thread LockGuard lock(_new_search_lock); diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h index 6e4eb32ee50..8213c02b90c 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h @@ -257,8 +257,8 @@ class IndexMaintainer : public IIndexManager, * result. */ bool reconfigure(std::unique_ptr<Configure> configure); - void warmupDone(ISearchableIndexCollection::SP current) override; - bool makeSureAllRemainingWarmupIsDone(ISearchableIndexCollection::SP keepAlive); + void warmupDone(std::shared_ptr<WarmupIndexCollection> current) override; + bool makeSureAllRemainingWarmupIsDone(std::shared_ptr<WarmupIndexCollection> keepAlive); void commit_and_wait(); void commit(vespalib::Gate& gate); void pruneRemovedFields(const Schema &schema, SerialNum serialNum); diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.cpp index 522789e7fe8..efd7827fc3d 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.cpp @@ -11,7 +11,7 @@ namespace searchcorespi::index { IndexMaintainerContext::IndexMaintainerContext(IThreadingService &threadingService, IIndexManager::Reconfigurer &reconfigurer, const FileHeaderContext &fileHeaderContext, - vespalib::SyncableThreadExecutor & warmupExecutor) + vespalib::Executor & warmupExecutor) : _threadingService(threadingService), _reconfigurer(reconfigurer), _fileHeaderContext(fileHeaderContext), diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.h b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.h index c90659c55bf..2c7aa4af48e 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.h +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainercontext.h @@ -17,13 +17,13 @@ private: IThreadingService &_threadingService; IIndexManager::Reconfigurer &_reconfigurer; const search::common::FileHeaderContext &_fileHeaderContext; - vespalib::SyncableThreadExecutor & _warmupExecutor; + vespalib::Executor & _warmupExecutor; public: IndexMaintainerContext(IThreadingService &threadingService, IIndexManager::Reconfigurer &reconfigurer, const search::common::FileHeaderContext &fileHeaderContext, - vespalib::SyncableThreadExecutor & warmupExecutor); + vespalib::Executor & warmupExecutor); /** * Returns the treading service that encapsulates the thread model used for writing. @@ -49,7 +49,7 @@ public: /** * @return The executor that should be used for warmup. */ - vespalib::SyncableThreadExecutor & getWarmupExecutor() const { return _warmupExecutor; } + vespalib::Executor & getWarmupExecutor() const { return _warmupExecutor; } }; } diff --git a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h index 0660f3ab495..c95a42f601b 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h +++ b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h @@ -63,17 +63,15 @@ struct IThreadingService IThreadingService() = default; virtual ~IThreadingService() = default; - virtual void sync_all_executors() = 0; - /** * Block the calling thread until the master thread has capacity to handle more tasks, * and then execute the given task in the master thread. */ virtual void blocking_master_execute(vespalib::Executor::Task::UP task) = 0; - virtual IThreadService &master() = 0; + virtual ISyncableThreadService &master() = 0; virtual IThreadService &index() = 0; - virtual IThreadService &summary() = 0; + virtual vespalib::ThreadExecutor &summary() = 0; virtual vespalib::ThreadExecutor &shared() = 0; virtual vespalib::ISequencedTaskExecutor &indexFieldInverter() = 0; virtual vespalib::ISequencedTaskExecutor &indexFieldWriter() = 0; diff --git a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp index d6aba7c6ff1..b9cbdab1c0a 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp @@ -5,6 +5,7 @@ #include <vespa/searchlib/query/tree/termnodes.h> #include <vespa/vespalib/stllike/hash_map.hpp> #include <vespa/vespalib/stllike/hash_set.h> +#include <thread> #include <vespa/log/log.h> LOG_SETUP(".searchcorespi.index.warmupindexcollection"); @@ -29,7 +30,7 @@ WarmupIndexCollection::WarmupIndexCollection(const WarmupConfig & warmupConfig, ISearchableIndexCollection::SP prev, ISearchableIndexCollection::SP next, IndexSearchable & warmup, - vespalib::SyncableThreadExecutor & executor, + vespalib::Executor & executor, IWarmupDone & warmupDone) : _warmupConfig(warmupConfig), _prev(std::move(prev)), @@ -38,7 +39,8 @@ WarmupIndexCollection::WarmupIndexCollection(const WarmupConfig & warmupConfig, _executor(executor), _warmupDone(warmupDone), _warmupEndTime(vespalib::steady_clock::now() + warmupConfig.getDuration()), - _handledTerms(std::make_unique<FieldTermMap>()) + _handledTerms(std::make_unique<FieldTermMap>()), + _pendingTasks() { if (_next->valid()) { setCurrentIndex(_next->getCurrentIndex()); @@ -79,7 +81,7 @@ WarmupIndexCollection::~WarmupIndexCollection() if (_warmupEndTime != vespalib::steady_time()) { LOG(info, "Warmup aborted due to new state change or application shutdown"); } - _executor.sync(); + assert(_pendingTasks.has_zero_ref_count()); } const ISourceSelector & @@ -164,7 +166,7 @@ WarmupIndexCollection::createBlueprint(const IRequestContext & requestContext, needWarmUp = needWarmUp || ! handledBefore(fs.getFieldId(), term); } if (needWarmUp) { - auto task = std::make_unique<WarmupTask>(mdl.createMatchData(), *this); + auto task = std::make_unique<WarmupTask>(mdl.createMatchData(), shared_from_this()); task->createBlueprint(fsl, term); fireWarmup(std::move(task)); } @@ -216,25 +218,32 @@ WarmupIndexCollection::getSearchableSP(uint32_t i) const return _next->getSearchableSP(i); } -WarmupIndexCollection::WarmupTask::WarmupTask(std::unique_ptr<MatchData> md, WarmupIndexCollection & warmup) - : _warmup(warmup), +void +WarmupIndexCollection::drainPending() { + _pendingTasks.waitForZeroRefCount(); +} + +WarmupIndexCollection::WarmupTask::WarmupTask(std::unique_ptr<MatchData> md, std::shared_ptr<WarmupIndexCollection> warmup) + : _warmup(std::move(warmup)), + _retainGuard(_warmup->_pendingTasks), _matchData(std::move(md)), _bluePrint(), _requestContext() -{ } +{ +} WarmupIndexCollection::WarmupTask::~WarmupTask() = default; void WarmupIndexCollection::WarmupTask::run() { - if (_warmup._warmupEndTime != vespalib::steady_time()) { + if (_warmup->_warmupEndTime != vespalib::steady_time()) { LOG(debug, "Warming up %s", _bluePrint->asString().c_str()); _bluePrint->fetchPostings(search::queryeval::ExecuteInfo::TRUE); SearchIterator::UP it(_bluePrint->createSearch(*_matchData, true)); it->initFullRange(); for (uint32_t docId = it->seekFirst(1); !it->isAtEnd(); docId = it->seekNext(docId+1)) { - if (_warmup.doUnpack()) { + if (_warmup->doUnpack()) { it->unpack(docId); } } diff --git a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h index d18e43b56a7..b0b2952bee8 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h +++ b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h @@ -5,16 +5,19 @@ #include "isearchableindexcollection.h" #include "warmupconfig.h" #include <vespa/vespalib/util/threadexecutor.h> +#include <vespa/vespalib/util/monitored_refcount.h> +#include <vespa/vespalib/util/retain_guard.h> #include <vespa/searchlib/queryeval/fake_requestcontext.h> namespace searchcorespi { class FieldTermMap; +class WarmupIndexCollection; class IWarmupDone { public: virtual ~IWarmupDone() { } - virtual void warmupDone(ISearchableIndexCollection::SP current) = 0; + virtual void warmupDone(std::shared_ptr<WarmupIndexCollection> current) = 0; }; /** * Index collection that holds a reference to the active one and a new one that @@ -30,7 +33,7 @@ public: ISearchableIndexCollection::SP prev, ISearchableIndexCollection::SP next, IndexSearchable & warmup, - vespalib::SyncableThreadExecutor & executor, + vespalib::Executor & executor, IWarmupDone & warmupDone); ~WarmupIndexCollection() override; // Implements IIndexCollection @@ -64,28 +67,30 @@ public: const ISearchableIndexCollection::SP & getNextIndexCollection() const { return _next; } vespalib::string toString() const override; bool doUnpack() const { return _warmupConfig.getUnpack(); } + void drainPending(); private: typedef search::fef::MatchData MatchData; typedef search::queryeval::FakeRequestContext FakeRequestContext; typedef vespalib::Executor::Task Task; class WarmupTask : public Task { public: - WarmupTask(std::unique_ptr<MatchData> md, WarmupIndexCollection & warmup); + WarmupTask(std::unique_ptr<MatchData> md, std::shared_ptr<WarmupIndexCollection> warmup); ~WarmupTask() override; WarmupTask &createBlueprint(const FieldSpec &field, const Node &term) { - _bluePrint = _warmup.createBlueprint(_requestContext, field, term); + _bluePrint = _warmup->createBlueprint(_requestContext, field, term); return *this; } WarmupTask &createBlueprint(const FieldSpecList &fields, const Node &term) { - _bluePrint = _warmup.createBlueprint(_requestContext, fields, term); + _bluePrint = _warmup->createBlueprint(_requestContext, fields, term); return *this; } private: void run() override; - WarmupIndexCollection & _warmup; - std::unique_ptr<MatchData> _matchData; - Blueprint::UP _bluePrint; - FakeRequestContext _requestContext; + std::shared_ptr<WarmupIndexCollection> _warmup; + vespalib::RetainGuard _retainGuard; + std::unique_ptr<MatchData> _matchData; + Blueprint::UP _bluePrint; + FakeRequestContext _requestContext; }; void fireWarmup(Task::UP task); @@ -95,11 +100,12 @@ private: ISearchableIndexCollection::SP _prev; ISearchableIndexCollection::SP _next; IndexSearchable & _warmup; - vespalib::SyncableThreadExecutor & _executor; + vespalib::Executor & _executor; IWarmupDone & _warmupDone; vespalib::steady_time _warmupEndTime; std::mutex _lock; std::unique_ptr<FieldTermMap> _handledTerms; + vespalib::MonitoredRefCount _pendingTasks; }; } // namespace searchcorespi diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json index e5611324254..2d7daf2300e 100644 --- a/searchlib/abi-spec.json +++ b/searchlib/abi-spec.json @@ -1457,6 +1457,7 @@ "protected void <init>(com.google.common.collect.ImmutableMap, java.util.Map)", "public com.yahoo.searchlib.rankingexpression.ExpressionFunction getFunction(java.lang.String)", "protected com.google.common.collect.ImmutableMap functions()", + "protected java.util.Map getFunctions()", "public java.lang.String getBinding(java.lang.String)", "public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withBindings(java.util.Map)", "public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withoutBindings()" @@ -1611,6 +1612,7 @@ "public void <init>(java.util.Map)", "public void <init>(java.util.Collection, java.util.Map)", "public void <init>(java.util.Collection, java.util.Map, java.util.Map)", + "public void <init>(java.util.Map, java.util.Map, java.util.Map)", "public void <init>(com.google.common.collect.ImmutableMap, java.util.Map, java.util.Map)", "public void addFunctionSerialization(java.lang.String, java.lang.String)", "public void addArgumentTypeSerialization(java.lang.String, java.lang.String, com.yahoo.tensor.TensorType)", diff --git a/searchlib/src/apps/tests/memoryindexstress_test.cpp b/searchlib/src/apps/tests/memoryindexstress_test.cpp index 54864702a47..fcac0be8f85 100644 --- a/searchlib/src/apps/tests/memoryindexstress_test.cpp +++ b/searchlib/src/apps/tests/memoryindexstress_test.cpp @@ -73,7 +73,7 @@ makeSchema() return schema; } -document::DocumenttypesConfig +document::config::DocumenttypesConfig makeDocTypeRepoConfig() { const int32_t doc_type_id = 787121340; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/ElementCompleteness.java b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/ElementCompleteness.java index b86cc62d3d1..af26a423906 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/ElementCompleteness.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/ElementCompleteness.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.ranking.features; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue; import com.yahoo.searchlib.rankingexpression.evaluation.Value; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/Features.java b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/Features.java index e004564d385..ee8f22b2e36 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/Features.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/Features.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.ranking.features; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.searchlib.rankingexpression.evaluation.Value; import java.util.Collections; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/FieldTermMatch.java b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/FieldTermMatch.java index 9f86dadc378..752ef137ead 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/FieldTermMatch.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/ranking/features/FieldTermMatch.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.ranking.features; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue; import com.yahoo.searchlib.rankingexpression.evaluation.Value; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/FeatureList.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/FeatureList.java index 6e752e4a168..ad24e89c1f7 100755 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/FeatureList.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/FeatureList.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.rankingexpression; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.searchlib.rankingexpression.parser.ParseException; import com.yahoo.searchlib.rankingexpression.parser.RankingExpressionParser; import com.yahoo.searchlib.rankingexpression.parser.TokenMgrException; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java index fb195d14040..b37bbb543eb 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.rankingexpression.evaluation; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.searchlib.rankingexpression.rule.Function; import com.yahoo.searchlib.rankingexpression.rule.TruthOperator; import com.yahoo.tensor.Tensor; diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java index f0586297b0d..287bc2655f5 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java @@ -17,7 +17,7 @@ import java.util.Map; public class FunctionReferenceContext { /** Expression functions indexed by name */ - private final ImmutableMap<String, ExpressionFunction> functions; + private final Map<String, ExpressionFunction> functions; /** Mapping from argument names to the expressions they resolve to */ private final Map<String, String> bindings = new HashMap<>(); @@ -43,26 +43,32 @@ public class FunctionReferenceContext { /** Create a context for a single serialization task */ public FunctionReferenceContext(Map<String, ExpressionFunction> functions, Map<String, String> bindings) { - this(ImmutableMap.copyOf(functions), bindings); + this.functions = Map.copyOf(functions); + if (bindings != null) + this.bindings.putAll(bindings); } + /** @deprecated Use {@link #FunctionReferenceContext(Map, Map)} instead */ + @Deprecated(forRemoval = true, since = "7") protected FunctionReferenceContext(ImmutableMap<String, ExpressionFunction> functions, Map<String, String> bindings) { - this.functions = functions; - if (bindings != null) - this.bindings.putAll(bindings); + this((Map<String, ExpressionFunction>)functions, bindings); } - private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { - ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>(); + private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { + Map<String, ExpressionFunction> mapBuilder = new HashMap<>(); for (ExpressionFunction function : list) mapBuilder.put(function.getName(), function); - return mapBuilder.build(); + return Map.copyOf(mapBuilder); } /** Returns a function or null if it isn't defined in this context */ public ExpressionFunction getFunction(String name) { return functions.get(name); } - protected ImmutableMap<String, ExpressionFunction> functions() { return functions; } + /** @deprecated Use {@link #getFunctions()} instead */ + @Deprecated(forRemoval = true, since = "7") + protected ImmutableMap<String, ExpressionFunction> functions() { return ImmutableMap.copyOf(functions); } + + protected Map<String, ExpressionFunction> getFunctions() { return functions; } /** Returns the resolution of an identifier, or null if it isn't defined in this context */ public String getBinding(String name) { return bindings.get(name); } diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java index cd2f966cc22..535ad013caf 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java @@ -8,6 +8,7 @@ import com.yahoo.tensor.TensorType; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -54,11 +55,11 @@ public class SerializationContext extends FunctionReferenceContext { this(toMap(functions), bindings, serializedFunctions); } - private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { - ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>(); + private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { + Map<String,ExpressionFunction> mapBuilder = new HashMap<>(); for (ExpressionFunction function : list) mapBuilder.put(function.getName(), function); - return mapBuilder.build(); + return Map.copyOf(mapBuilder); } /** @@ -69,12 +70,19 @@ public class SerializationContext extends FunctionReferenceContext { * @param serializedFunctions a cache of serializedFunctions - the ownership of this map * is <b>transferred</b> to this and will be modified in it */ - public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings, + public SerializationContext(Map<String,ExpressionFunction> functions, Map<String, String> bindings, Map<String, String> serializedFunctions) { super(functions, bindings); this.serializedFunctions = serializedFunctions; } + /** @deprecated Use {@link #SerializationContext(Map, Map, Map) instead}*/ + @Deprecated(forRemoval = true, since = "7") + public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings, + Map<String, String> serializedFunctions) { + this((Map<String, ExpressionFunction>)functions, bindings, serializedFunctions); + } + /** Adds the serialization of a function */ public void addFunctionSerialization(String name, String expressionString) { serializedFunctions.put(name, expressionString); @@ -93,13 +101,13 @@ public class SerializationContext extends FunctionReferenceContext { @Override public SerializationContext withBindings(Map<String, String> bindings) { - return new SerializationContext(functions(), bindings, this.serializedFunctions); + return new SerializationContext(getFunctions(), bindings, this.serializedFunctions); } /** Returns a fresh context without bindings */ @Override public SerializationContext withoutBindings() { - return new SerializationContext(functions(), null, this.serializedFunctions); + return new SerializationContext(getFunctions(), null, this.serializedFunctions); } public Map<String, String> serializedFunctions() { return serializedFunctions; } diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java index 2d06e171097..d873963bb6e 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.rankingexpression.rule; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableMap; import com.yahoo.searchlib.rankingexpression.ExpressionFunction; import com.yahoo.searchlib.rankingexpression.Reference; @@ -328,7 +328,14 @@ public class TensorFunctionNode extends CompositeNode { /** Returns a function or null if it isn't defined in this context */ public ExpressionFunction getFunction(String name) { return wrappedSerializationContext.getFunction(name); } - protected ImmutableMap<String, ExpressionFunction> functions() { return wrappedSerializationContext.functions(); } + /** @deprecated Use {@link #getFunctions()} instead */ + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") + protected ImmutableMap<String, ExpressionFunction> functions() { + return ImmutableMap.copyOf(wrappedSerializationContext.getFunctions()); + } + + @Override protected Map<String, ExpressionFunction> getFunctions() { return wrappedSerializationContext.getFunctions(); } public ToStringContext parent() { return wrappedToStringContext; } @@ -344,14 +351,14 @@ public class TensorFunctionNode extends CompositeNode { /** Returns a new context with the bindings replaced by the given bindings */ @Override public ExpressionToStringContext withBindings(Map<String, String> bindings) { - SerializationContext serializationContext = new SerializationContext(functions(), bindings, serializedFunctions()); + SerializationContext serializationContext = new SerializationContext(getFunctions(), bindings, serializedFunctions()); return new ExpressionToStringContext(serializationContext, wrappedToStringContext, path, parent); } /** Returns a fresh context without bindings */ @Override public SerializationContext withoutBindings() { - SerializationContext serializationContext = new SerializationContext(functions(), null, serializedFunctions()); + SerializationContext serializationContext = new SerializationContext(getFunctions(), null, serializedFunctions()); return new ExpressionToStringContext(serializationContext, null, path, parent); } } diff --git a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp index 3c8c9ff17e0..16a04a746f3 100644 --- a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp +++ b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp @@ -49,7 +49,7 @@ void verify_roundtrip_serialization(const HnswIPO& hnsw_params_in) { auto gen_header = populate_header(hnsw_params_in); - auto attr_header = AttributeHeader::extractTags(gen_header); + auto attr_header = AttributeHeader::extractTags(gen_header, file_name); EXPECT_EQ(tensor_cfg.basicType(), attr_header.getBasicType()); EXPECT_EQ(tensor_cfg.collectionType(), attr_header.getCollectionType()); diff --git a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp index 801aa9341fb..fbec89d27eb 100644 --- a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp +++ b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp @@ -16,6 +16,7 @@ using search::attribute::Config; using search::attribute::BasicType; using search::attribute::CollectionType; using vespalib::AddressSpace; +using vespalib::datastore::CompactionStrategy; using AttributePtr = AttributeVector::SP; using AttributeStatus = search::attribute::Status; @@ -237,7 +238,7 @@ TEST_F("Compaction limits address space usage (dead) when free lists are NOT use { populate_and_hammer(f, true); AddressSpace afterSpace = f.getMultiValueAddressSpaceUsage("after"); - EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead()); + EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead()); } TEST_F("Compaction is not executed when free lists are used", @@ -266,7 +267,7 @@ TEST_F("Compaction is peformed when compaction strategy is changed to enable com f._v->commit(); // new commit might trigger further compaction after2 = f.getMultiValueAddressSpaceUsage("after2"); } - EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead()); + EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead()); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp index 2c8fc2966b0..b30b3e4eb71 100644 --- a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp +++ b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp @@ -175,7 +175,7 @@ void CompactionTest<VectorType>::test_enum_store_compaction() { constexpr uint32_t canary_stride = 256; - uint32_t dead_limit = search::CompactionStrategy::DEAD_BYTES_SLACK / 8; + uint32_t dead_limit = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / 8; uint32_t doc_count = dead_limit * 3; if (_v->hasMultiValue() || std::is_same_v<VectorType,StringAttribute>) { doc_count /= 2; diff --git a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp index 9c25429932b..5346cc7f764 100644 --- a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp +++ b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp @@ -7,7 +7,23 @@ LOG_SETUP("enumstore_test"); using Type = search::DictionaryConfig::Type; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; +using vespalib::datastore::EntryRefFilter; +using RefT = vespalib::datastore::EntryRefT<22>; + +namespace vespalib::datastore { + +/* + * Print EntryRef as RefT which is used by test_normalize_posting_lists and + * test_foreach_posting_list to differentiate between buffers + */ +void PrintTo(const EntryRef &ref, std::ostream* os) { + RefT iref(ref); + *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")"; +} + +} namespace search { @@ -346,16 +362,16 @@ TEST(EnumStoreTest, address_space_usage_is_reported) NumericEnumStore store(false, DictionaryConfig::Type::BTREE); using vespalib::AddressSpace; - EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_values_address_space_usage()); EnumIndex idx1 = store.insert(10); - EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_values_address_space_usage()); EnumIndex idx2 = store.insert(20); // Address limit increases because buffer is re-sized. - EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); dec_ref_count(store, idx1); - EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); dec_ref_count(store, idx2); - EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); } class BatchUpdaterTest : public ::testing::Test { @@ -597,6 +613,11 @@ public: void update_posting_idx(EnumIndex enum_idx, EntryRef old_posting_idx, EntryRef new_posting_idx); EnumIndex insert_value(size_t value_idx); + void populate_sample_data(uint32_t cnt); + std::vector<EntryRef> get_sample_values(uint32_t cnt); + void clear_sample_values(uint32_t cnt); + void test_normalize_posting_lists(bool use_filter, bool one_filter); + void test_foreach_posting_list(bool one_filter); static EntryRef fake_pidx() { return EntryRef(42); } }; @@ -620,6 +641,149 @@ EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::insert_value(size_t val return enum_idx; } +namespace { +/* + * large_population should trigger multiple callbacks from normalize_values + * and foreach_value + */ +constexpr uint32_t large_population = 1200; + +uint32_t select_buffer(uint32_t i) { + if ((i % 2) == 0) { + return 0; + } + if ((i % 3) == 0) { + return 1; + } + if ((i % 5) == 0) { + return 2; + } + return 3; +} + +EntryRef make_fake_pidx(uint32_t i) { return RefT(i + 200, select_buffer(i)); } +EntryRef make_fake_adjusted_pidx(uint32_t i) { return RefT(i + 500, select_buffer(i)); } +EntryRef adjust_fake_pidx(EntryRef ref) { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); } + +} + + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::populate_sample_data(uint32_t cnt) +{ + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto enum_idx = store.insert(i); + EXPECT_TRUE(enum_idx.valid()); + EntryRef posting_idx(make_fake_pidx(i)); + dict.update_posting_list(enum_idx, store.get_comparator(), [posting_idx](EntryRef) noexcept -> EntryRef { return posting_idx; }); + } +} + +template <typename EnumStoreTypeAndDictionaryType> +std::vector<EntryRef> +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::get_sample_values(uint32_t cnt) +{ + std::vector<EntryRef> result; + result.reserve(cnt); + store.freeze_dictionary(); + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto compare = store.make_comparator(i); + auto enum_idx = dict.find(compare); + EXPECT_TRUE(enum_idx.valid()); + EntryRef posting_idx; + dict.update_posting_list(enum_idx, compare, [&posting_idx](EntryRef ref) noexcept { posting_idx = ref; return ref; });; + auto find_result = dict.find_posting_list(compare, dict.get_frozen_root()); + EXPECT_EQ(enum_idx, find_result.first); + EXPECT_EQ(posting_idx, find_result.second); + result.emplace_back(find_result.second); + } + return result; +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::clear_sample_values(uint32_t cnt) +{ + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto comparator = store.make_comparator(i); + auto enum_idx = dict.find(comparator); + EXPECT_TRUE(enum_idx.valid()); + dict.update_posting_list(enum_idx, comparator, [](EntryRef) noexcept -> EntryRef { return EntryRef(); }); + } +} + +namespace { + +EntryRefFilter make_entry_ref_filter(bool one_filter) +{ + if (one_filter) { + EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits); + filter.add_buffer(3); + return filter; + } + return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits); +} + +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_normalize_posting_lists(bool use_filter, bool one_filter) +{ + populate_sample_data(large_population); + auto& dict = store.get_dictionary(); + std::vector<EntryRef> exp_refs; + std::vector<EntryRef> exp_adjusted_refs; + exp_refs.reserve(large_population); + exp_adjusted_refs.reserve(large_population); + for (uint32_t i = 0; i < large_population; ++i) { + exp_refs.emplace_back(make_fake_pidx(i)); + if (!use_filter || !one_filter || select_buffer(i) == 3) { + exp_adjusted_refs.emplace_back(make_fake_adjusted_pidx(i)); + } else { + exp_adjusted_refs.emplace_back(make_fake_pidx(i)); + } + } + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + if (use_filter) { + auto filter = make_entry_ref_filter(one_filter); + auto dummy = [](std::vector<EntryRef>&) noexcept { }; + auto adjust_refs = [](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { ref = adjust_fake_pidx(ref); } }; + EXPECT_FALSE(dict.normalize_posting_lists(dummy, filter)); + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs, filter)); + } else { + auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; }; + auto adjust_refs = [](EntryRef ref) noexcept { return adjust_fake_pidx(ref); }; + EXPECT_FALSE(dict.normalize_posting_lists(dummy)); + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs)); + } + EXPECT_EQ(exp_adjusted_refs, get_sample_values(large_population)); + clear_sample_values(large_population); +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_foreach_posting_list(bool one_filter) +{ + auto filter = make_entry_ref_filter(one_filter); + populate_sample_data(large_population); + auto& dict = store.get_dictionary(); + std::vector<EntryRef> exp_refs; + auto save_exp_refs = [&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); }; + EXPECT_FALSE(dict.normalize_posting_lists(save_exp_refs, filter)); + std::vector<EntryRef> act_refs; + auto save_act_refs = [&act_refs](const std::vector<EntryRef>& refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); }; + dict.foreach_posting_list(save_act_refs, filter); + EXPECT_EQ(exp_refs, act_refs); + clear_sample_values(large_population); +} + // Disable warnings emitted by gtest generated files when using typed tests #pragma GCC diagnostic push #ifndef __clang__ @@ -678,26 +842,27 @@ TYPED_TEST(EnumStoreDictionaryTest, find_posting_list_works) TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_works) { - auto value_0_idx = this->insert_value(0); - this->update_posting_idx(value_0_idx, EntryRef(), this->fake_pidx()); - this->store.freeze_dictionary(); - auto& dict = this->store.get_dictionary(); - auto root = dict.get_frozen_root(); - auto find_result = dict.find_posting_list(this->make_bound_comparator(0), root); - EXPECT_EQ(value_0_idx, find_result.first); - EXPECT_EQ(this->fake_pidx(), find_result.second); - auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; }; - std::vector<EntryRef> saved_refs; - auto save_refs_and_clear = [&saved_refs](EntryRef posting_idx) { saved_refs.push_back(posting_idx); return EntryRef(); }; - EXPECT_FALSE(dict.normalize_posting_lists(dummy)); - EXPECT_TRUE(dict.normalize_posting_lists(save_refs_and_clear)); - EXPECT_FALSE(dict.normalize_posting_lists(save_refs_and_clear)); - EXPECT_EQ((std::vector<EntryRef>{ this->fake_pidx(), EntryRef() }), saved_refs); - this->store.freeze_dictionary(); - root = dict.get_frozen_root(); - find_result = dict.find_posting_list(this->make_bound_comparator(0), root); - EXPECT_EQ(value_0_idx, find_result.first); - EXPECT_EQ(EntryRef(), find_result.second); + this->test_normalize_posting_lists(false, false); +} + +TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_all_filter_works) +{ + this->test_normalize_posting_lists(true, false); +} + +TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_one_filter_works) +{ + this->test_normalize_posting_lists(true, true); +} + +TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_all_filter_works) +{ + this->test_foreach_posting_list(false); +} + +TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_one_filter_works) +{ + this->test_foreach_posting_list(true); } namespace { @@ -714,7 +879,7 @@ void inc_generation(generation_t &gen, NumericEnumStore &store) TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) { - size_t entry_count = (search::CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40; + size_t entry_count = (CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40; auto updater = this->store.make_batch_updater(); for (int32_t i = 0; (size_t) i < entry_count; ++i) { auto idx = updater.insert(i); @@ -727,15 +892,15 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) inc_generation(gen, this->store); auto& dict = this->store.get_dictionary(); if (dict.get_has_btree_dictionary()) { - EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); + EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); } if (dict.get_has_hash_dictionary()) { - EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); + EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); } int compact_count = 0; - search::CompactionStrategy compaction_strategy; + CompactionStrategy compaction_strategy; for (uint32_t i = 0; i < 15; ++i) { - this->store.update_stat(); + this->store.update_stat(compaction_strategy); if (this->store.consider_compact_dictionary(compaction_strategy)) { ++compact_count; } else { @@ -747,10 +912,10 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) EXPECT_LT((TypeParam::type == Type::BTREE_AND_HASH) ? 1 : 0, compact_count); EXPECT_GT(15, compact_count); if (dict.get_has_btree_dictionary()) { - EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); + EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); } if (dict.get_has_hash_dictionary()) { - EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); + EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); } std::vector<int32_t> exp_values; std::vector<int32_t> values; diff --git a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp index 8b1906573d4..bddaa4f4e31 100644 --- a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp +++ b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp @@ -14,6 +14,8 @@ LOG_SETUP("multivaluemapping_test"); using vespalib::datastore::ArrayStoreConfig; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; template <typename EntryT> void @@ -142,7 +144,9 @@ public: } void compactWorst() { - _mvMapping->compactWorst(true, false); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + _mvMapping->compactWorst(compaction_spec, compaction_strategy); _attr->commit(); _attr->incGeneration(); } diff --git a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp index cd78332cacd..10cc14012dd 100644 --- a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp +++ b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp @@ -13,6 +13,7 @@ #include <ostream> using vespalib::GenerationHandler; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace search::attribute { @@ -160,9 +161,9 @@ PostingStoreTest::test_compact_sequence(uint32_t sequence_length) EntryRef old_ref2 = get_posting_ref(2); auto usage_before = store.getMemoryUsage(); bool compaction_done = false; - search::CompactionStrategy compaction_strategy(0.05, 0.2); + CompactionStrategy compaction_strategy(0.05, 0.2); for (uint32_t pass = 0; pass < 45; ++pass) { - store.update_stat(); + store.update_stat(compaction_strategy); auto guard = _gen_handler.takeGuard(); if (!store.consider_compact_worst_buffers(compaction_strategy)) { compaction_done = true; @@ -193,9 +194,9 @@ PostingStoreTest::test_compact_btree_nodes(uint32_t sequence_length) EntryRef old_ref2 = get_posting_ref(2); auto usage_before = store.getMemoryUsage(); bool compaction_done = false; - search::CompactionStrategy compaction_strategy(0.05, 0.2); + CompactionStrategy compaction_strategy(0.05, 0.2); for (uint32_t pass = 0; pass < 55; ++pass) { - store.update_stat(); + store.update_stat(compaction_strategy); auto guard = _gen_handler.takeGuard(); if (!store.consider_compact_worst_btree_nodes(compaction_strategy)) { compaction_done = true; diff --git a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp index c077ab83a6e..1a8eda40f52 100644 --- a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp +++ b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp @@ -176,7 +176,7 @@ struct ReferenceAttributeTest : public ::testing::Test { search::attribute::Status newStatus = oldStatus; uint64_t iter = 0; AttributeGuard guard(_attr); - uint64_t dropCount = search::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference); + uint64_t dropCount = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference); for (; iter < iterLimit; ++iter) { clear(2); set(2, toGid(doc2)); diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp index 922b0d4fb3e..8a6f1e08fa6 100644 --- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp +++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp @@ -38,7 +38,6 @@ using document::WrongTensorTypeException; using search::AddressSpaceUsage; using search::AttributeGuard; using search::AttributeVector; -using search::CompactionStrategy; using search::attribute::DistanceMetric; using search::attribute::HnswIndexParams; using search::queryeval::GlobalFilter; @@ -56,6 +55,7 @@ using search::tensor::NearestNeighborIndexLoader; using search::tensor::NearestNeighborIndexSaver; using search::tensor::PrepareResult; using search::tensor::TensorAttribute; +using vespalib::datastore::CompactionStrategy; using vespalib::eval::TensorSpec; using vespalib::eval::CellType; using vespalib::eval::ValueType; @@ -222,7 +222,8 @@ public: bool consider_compact(const CompactionStrategy&) override { return false; } - vespalib::MemoryUsage update_stat() override { + vespalib::MemoryUsage update_stat(const CompactionStrategy&) override { + ++_memory_usage_cnt; return vespalib::MemoryUsage(); } vespalib::MemoryUsage memory_usage() const override { diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp index dec7b911f65..f2bec30a349 100644 --- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp +++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp @@ -25,6 +25,7 @@ struct NullDataStore : IDataStore { size_t memoryMeta() const override { return 0; } size_t getDiskFootprint() const override { return 0; } size_t getDiskBloat() const override { return 0; } + size_t getMaxSpreadAsBloat() const override { return 0; } uint64_t lastSyncToken() const override { return 0; } uint64_t tentativeLastSyncToken() const override { return 0; } vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); } diff --git a/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp b/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp index 792cc48c338..072efb06a07 100644 --- a/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp +++ b/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp @@ -36,7 +36,7 @@ const string doc_type_name = "test"; const string header_name = doc_type_name + ".header"; const string body_name = doc_type_name + ".body"; -document::DocumenttypesConfig +document::config::DocumenttypesConfig makeDocTypeRepoConfig() { const int32_t doc_type_id = 787121340; diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp index 242a3c31663..378babb6ee1 100644 --- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp +++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp @@ -236,7 +236,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3 datastore.remove(i + 20000, i); } datastore.flush(datastore.initFlush(lastSyncToken)); - datastore.compact(30000); + datastore.compactBloat(30000); datastore.remove(31000, 0); checkStats(datastore, 31000, 30000); EXPECT_LESS_EQUAL(minFiles, datastore.getAllActiveFiles().size()); @@ -252,7 +252,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3 } TEST("testGrowingChunkedBySize") { LogDataStore::Config config; - config.setMaxFileSize(100000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) + config.setMaxFileSize(100000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) .compactCompression({CompressionConfig::LZ4}) .setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000}); verifyGrowing(config, 40, 120); @@ -260,7 +260,7 @@ TEST("testGrowingChunkedBySize") { TEST("testGrowingChunkedByNumLids") { LogDataStore::Config config; - config.setMaxNumLids(1000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) + config.setMaxNumLids(1000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) .compactCompression({CompressionConfig::LZ4}) .setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000}); verifyGrowing(config,10, 10); @@ -399,7 +399,7 @@ const string doc_type_name = "test"; const string header_name = doc_type_name + ".header"; const string body_name = doc_type_name + ".body"; -document::DocumenttypesConfig +document::config::DocumenttypesConfig makeDocTypeRepoConfig() { const int32_t doc_type_id = 787121340; @@ -679,7 +679,7 @@ TEST("testWriteRead") { EXPECT_LESS(0u, headerFootprint); EXPECT_EQUAL(datastore.getDiskFootprint(), headerFootprint); EXPECT_EQUAL(datastore.getDiskBloat(), 0ul); - EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul); + EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul); datastore.write(1, 0, a[0].c_str(), a[0].size()); fetchAndTest(datastore, 0, a[0].c_str(), a[0].size()); datastore.write(2, 0, a[1].c_str(), a[1].size()); @@ -701,7 +701,7 @@ TEST("testWriteRead") { EXPECT_EQUAL(datastore.getDiskFootprint(), 2711ul + headerFootprint); EXPECT_EQUAL(datastore.getDiskBloat(), 0ul); - EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul); + EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul); datastore.flush(datastore.initFlush(lastSyncToken)); } { @@ -715,7 +715,7 @@ TEST("testWriteRead") { EXPECT_LESS(0u, headerFootprint); EXPECT_EQUAL(4944ul + headerFootprint, datastore.getDiskFootprint()); EXPECT_EQUAL(0ul, datastore.getDiskBloat()); - EXPECT_EQUAL(0ul, datastore.getMaxCompactGain()); + EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat()); for(size_t i=0; i < 100; i++) { fetchAndTest(datastore, i, a[i%2].c_str(), a[i%2].size()); @@ -730,7 +730,7 @@ TEST("testWriteRead") { EXPECT_EQUAL(7594ul + headerFootprint, datastore.getDiskFootprint()); EXPECT_EQUAL(0ul, datastore.getDiskBloat()); - EXPECT_EQUAL(0ul, datastore.getMaxCompactGain()); + EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat()); } FastOS_File::EmptyAndRemoveDirectory("empty"); } @@ -1050,7 +1050,6 @@ TEST("require that config equality operator detects inequality") { using C = LogDataStore::Config; EXPECT_TRUE(C() == C()); EXPECT_FALSE(C() == C().setMaxFileSize(1)); - EXPECT_FALSE(C() == C().setMaxDiskBloatFactor(0.3)); EXPECT_FALSE(C() == C().setMaxBucketSpread(0.3)); EXPECT_FALSE(C() == C().setMinFileSizeFactor(0.3)); EXPECT_FALSE(C() == C().setFileConfig(WriteableFileChunk::Config({}, 70))); diff --git a/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp b/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp index 4b7062be4f8..f8a07270292 100644 --- a/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp +++ b/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp @@ -26,7 +26,7 @@ TEST("testSearchDocType") { s.addSummaryField(Schema::SummaryField("sa", DataType::STRING)); DocTypeBuilder docTypeBuilder(s); - document::DocumenttypesConfig config = docTypeBuilder.makeConfig(); + document::config::DocumenttypesConfig config = docTypeBuilder.makeConfig(); DocumentTypeRepo repo(config); const DocumentType *docType = repo.getDocumentType("searchdocument"); ASSERT_TRUE(docType); @@ -57,7 +57,7 @@ TEST("require that multiple fields can have the same type") { s.addIndexField(Schema::IndexField("array1", DataType::STRING, CollectionType::ARRAY)); s.addIndexField(Schema::IndexField("array2", DataType::STRING, CollectionType::ARRAY)); DocTypeBuilder docTypeBuilder(s); - document::DocumenttypesConfig config = docTypeBuilder.makeConfig(); + document::config::DocumenttypesConfig config = docTypeBuilder.makeConfig(); DocumentTypeRepo repo(config); const DocumentType *docType = repo.getDocumentType("searchdocument"); ASSERT_TRUE(docType); diff --git a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp index 032960c3799..149662cd266 100644 --- a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp +++ b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp @@ -75,10 +75,19 @@ assertArraySize(const vespalib::string &tensorType, uint32_t expArraySize) { TEST("require that array size is calculated correctly") { - TEST_DO(assertArraySize("tensor(x[1])", 32)); + TEST_DO(assertArraySize("tensor(x[1])", 8)); TEST_DO(assertArraySize("tensor(x[10])", 96)); TEST_DO(assertArraySize("tensor(x[3])", 32)); TEST_DO(assertArraySize("tensor(x[10],y[10])", 800)); + TEST_DO(assertArraySize("tensor<int8>(x[1])", 8)); + TEST_DO(assertArraySize("tensor<int8>(x[8])", 8)); + TEST_DO(assertArraySize("tensor<int8>(x[9])", 16)); + TEST_DO(assertArraySize("tensor<int8>(x[16])", 16)); + TEST_DO(assertArraySize("tensor<int8>(x[17])", 32)); + TEST_DO(assertArraySize("tensor<int8>(x[32])", 32)); + TEST_DO(assertArraySize("tensor<int8>(x[33])", 64)); + TEST_DO(assertArraySize("tensor<int8>(x[64])", 64)); + TEST_DO(assertArraySize("tensor<int8>(x[65])", 96)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp index a54f981352b..f0e156a96ed 100644 --- a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp +++ b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp @@ -71,7 +71,6 @@ TEST(DistanceFunctionsTest, euclidean_int8_smoketest) auto euclid = make_distance_function(DistanceMetric::Euclidean, ct); - std::vector<double> p00{0.0, 0.0, 0.0}; std::vector<Int8Float> p0{0.0, 0.0, 0.0}; std::vector<Int8Float> p1{1.0, 0.0, 0.0}; std::vector<Int8Float> p5{0.0,-1.0, 0.0}; @@ -85,9 +84,6 @@ TEST(DistanceFunctionsTest, euclidean_int8_smoketest) EXPECT_DOUBLE_EQ(12.0, euclid->calc(t(p1), t(p7))); EXPECT_DOUBLE_EQ(14.0, euclid->calc(t(p5), t(p7))); - EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p00), t(p1))); - EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p00), t(p5))); - EXPECT_DOUBLE_EQ(9.0, euclid->calc(t(p00), t(p7))); } TEST(DistanceFunctionsTest, angular_gives_expected_score) diff --git a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp index 7acd3cf8b57..6054d473c1f 100644 --- a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp +++ b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp @@ -1,12 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchlib/common/bitvector.h> #include <vespa/searchlib/tensor/distance_functions.h> #include <vespa/searchlib/tensor/doc_vector_access.h> #include <vespa/searchlib/tensor/hnsw_index.h> #include <vespa/searchlib/tensor/random_level_generator.h> #include <vespa/searchlib/tensor/inv_log_level_generator.h> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/gtest/gtest.h> #include <vespa/vespalib/util/generationhandler.h> #include <vespa/vespalib/data/slime/slime.h> @@ -21,7 +22,8 @@ using namespace search::tensor; using namespace vespalib::slime; using vespalib::Slime; using search::BitVector; -using search::CompactionStrategy; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; template <typename FloatType> class MyDocVectorAccess : public DocVectorAccess { @@ -116,7 +118,8 @@ public: } MemoryUsage commit_and_update_stat() { commit(); - return index->update_stat(); + CompactionStrategy compaction_strategy; + return index->update_stat(compaction_strategy); } void expect_entry_point(uint32_t exp_docid, uint32_t exp_level) { EXPECT_EQ(exp_docid, index->get_entry_docid()); @@ -628,10 +631,12 @@ TEST_F(HnswIndexTest, hnsw_graph_is_compacted) for (uint32_t i = 0; i < 10; ++i) { mem_1 = mem_2; // Forced compaction to move things around - index->compact_link_arrays(true, false); - index->compact_level_arrays(true, false); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + index->compact_link_arrays(compaction_spec, compaction_strategy); + index->compact_level_arrays(compaction_spec, compaction_strategy); commit(); - index->update_stat(); + index->update_stat(compaction_strategy); mem_2 = commit_and_update_stat(); EXPECT_LE(mem_2.usedBytes(), mem_1.usedBytes()); if (mem_2.usedBytes() == mem_1.usedBytes()) { diff --git a/searchlib/src/tests/transactionlog/CMakeLists.txt b/searchlib/src/tests/transactionlog/CMakeLists.txt index b09271eefe2..0904dc3ee36 100644 --- a/searchlib/src/tests/transactionlog/CMakeLists.txt +++ b/searchlib/src/tests/transactionlog/CMakeLists.txt @@ -5,8 +5,7 @@ vespa_add_executable(searchlib_translogclient_test_app TEST DEPENDS searchlib ) -vespa_add_test(NAME searchlib_translogclient_test_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/translogclient_test.sh - DEPENDS searchlib_translogclient_test_app COST 100) +vespa_add_test(NAME searchlib_translogclient_test_app COMMAND searchlib_translogclient_test_app) vespa_add_executable(searchlib_translog_chunks_test_app TEST SOURCES diff --git a/searchlib/src/tests/transactionlog/translogclient_test.cpp b/searchlib/src/tests/transactionlog/translogclient_test.cpp index ab5d432ddfb..d3c3af3a9ca 100644 --- a/searchlib/src/tests/transactionlog/translogclient_test.cpp +++ b/searchlib/src/tests/transactionlog/translogclient_test.cpp @@ -1,10 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/searchlib/transactionlog/translogclient.h> #include <vespa/searchlib/transactionlog/translogserver.h> +#include <vespa/searchlib/test/directory_handler.h> #include <vespa/vespalib/testkit/testapp.h> #include <vespa/vespalib/objects/identifiable.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/document/util/bytebuffer.h> +#include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/destructor_callbacks.h> #include <vespa/fastos/file.h> #include <thread> @@ -33,8 +36,8 @@ void fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t e uint32_t countFiles(const vespalib::string &dir); void checkFilledDomainTest(Session &s1, size_t numEntries); bool visitDomainTest(TransLogClient & tls, Session * s1, const vespalib::string & name); -void createAndFillDomain(const vespalib::string & name, Encoding encoding, size_t preExistingDomains); -void verifyDomain(const vespalib::string & name); +void createAndFillDomain(const vespalib::string & dir, const vespalib::string & name, Encoding encoding, size_t preExistingDomains); +void verifyDomain(const vespalib::string & dir, const vespalib::string & name); vespalib::string myhex(const void * b, size_t sz) @@ -50,6 +53,12 @@ myhex(const void * b, size_t sz) return s; } +DomainConfig +createDomainConfig(uint32_t partSizeLimit) { + return DomainConfig().setPartSizeLimit(partSizeLimit) + .setEncoding(Encoding(Encoding::xxh64, Encoding::none_multi)); +} + class CallBackTest : public Callback { private: @@ -308,49 +317,39 @@ fillDomainTest(Session * s1, size_t numPackets, size_t numEntries) } } -using Counter = std::atomic<size_t>; - -class CountDone : public IDestructorCallback { -public: - explicit CountDone(Counter & inFlight) noexcept : _inFlight(inFlight) { ++_inFlight; } - ~CountDone() override { --_inFlight; } -private: - Counter & _inFlight; -}; - void -fillDomainTest(TransLogServer & s1, const vespalib::string & domain, size_t numPackets, size_t numEntries) +fillDomainTest(IDestructorCallback::SP onDone, TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries) { size_t value(0); - Counter inFlight(0); - auto domainWriter = s1.getWriter(domain); - for(size_t i=0; i < numPackets; i++) { - std::unique_ptr<Packet> p(new Packet(DEFAULT_PACKET_SIZE)); - for(size_t j=0; j < numEntries; j++, value++) { - Packet::Entry e(value+1, j+1, vespalib::ConstBufferRef((const char *)&value, sizeof(value))); + auto domainWriter = tls.getWriter(domain); + + for (size_t i = 0; i < numPackets; i++) { + auto p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE); + for (size_t j = 0; j < numEntries; j++, value++) { + Packet::Entry e(value + 1, j + 1, vespalib::ConstBufferRef((const char *) &value, sizeof(value))); p->add(e); - if ( p->sizeBytes() > DEFAULT_PACKET_SIZE ) { - domainWriter->append(*p, std::make_shared<CountDone>(inFlight)); + if (p->sizeBytes() > DEFAULT_PACKET_SIZE) { + domainWriter->append(*p, onDone); p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE); } } - domainWriter->append(*p, std::make_shared<CountDone>(inFlight)); - auto keep = domainWriter->startCommit(Writer::DoneCallback()); - LOG(info, "Inflight %ld", inFlight.load()); + domainWriter->append(*p, onDone); + auto keep = domainWriter->startCommit(onDone); } - while (inFlight.load() != 0) { - std::this_thread::sleep_for(10ms); - LOG(info, "Waiting for inflight %ld to reach zero", inFlight.load()); - } - } +void +fillDomainTest(TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries) { + vespalib::Gate gate; + fillDomainTest(std::make_shared<vespalib::GateCallback>(gate), tls, domain, numPackets, numEntries); + gate.await(); +} void fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t entrySize) { size_t value(0); - std::vector<char> entryBuffer(entrySize); + std::vector<char> entryBuffer(entrySize); for(size_t i=0; i < numPackets; i++) { std::unique_ptr<Packet> p(new Packet(DEFAULT_PACKET_SIZE)); for(size_t j=0; j < numEntries; j++, value++) { @@ -457,11 +456,12 @@ getMaxSessionRunTime(TransLogServer &tls, const vespalib::string &domain) return tls.getDomainStats()[domain].maxSessionRunTime.count(); } -void createAndFillDomain(const vespalib::string & name, Encoding encoding, size_t preExistingDomains) +void +createAndFillDomain(const vespalib::string & dir, const vespalib::string & name, Encoding encoding, size_t preExistingDomains) { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test13", 18377, ".", fileHeaderContext, - DomainConfig().setPartSizeLimit(0x1000000).setEncoding(encoding), 4); + TransLogServer tlss(dir, 18377, ".", fileHeaderContext, + createDomainConfig(0x1000000).setEncoding(encoding), 4); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, name, preExistingDomains); @@ -469,19 +469,21 @@ void createAndFillDomain(const vespalib::string & name, Encoding encoding, size_ fillDomainTest(s1.get(), name); } -void verifyDomain(const vespalib::string & name) { +void +verifyDomain(const vespalib::string & dir, const vespalib::string & name) { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test13", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(dir, 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, name); visitDomainTest(tls, s1.get(), name); } -} -TEST("testVisitOverGeneratedDomain") { + +void +testVisitOverGeneratedDomain(const vespalib::string & testDir) { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test7", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x10000)); TransLogClient tls("tcp/localhost:18377"); vespalib::string name("test1"); @@ -495,10 +497,11 @@ TEST("testVisitOverGeneratedDomain") { EXPECT_GREATER(maxSessionRunTime, 0); } -TEST("testVisitOverPreExistingDomain") { +void +testVisitOverPreExistingDomain(const vespalib::string & testDir) { // Depends on Test::testVisitOverGeneratedDomain() DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test7", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x10000)); TransLogClient tls("tcp/localhost:18377"); vespalib::string name("test1"); @@ -506,9 +509,10 @@ TEST("testVisitOverPreExistingDomain") { visitDomainTest(tls, s1.get(), name); } -TEST("partialUpdateTest") { +void +partialUpdateTest(const vespalib::string & testDir) { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test7", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x10000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, "test1"); @@ -532,7 +536,7 @@ TEST("partialUpdateTest") { ASSERT_TRUE( visitor->visit(5, 7) ); for (size_t i(0); ! ca._eof && (i < 1000); i++ ) { std::this_thread::sleep_for(10ms); } ASSERT_TRUE( ca._eof ); - ASSERT_TRUE( ca.map().size() == 1); + ASSERT_EQUAL(1u, ca.map().size()); ASSERT_TRUE( ca.hasSerial(7) ); CallBackUpdate ca1; @@ -561,17 +565,33 @@ TEST("partialUpdateTest") { ASSERT_TRUE( ca3.hasSerial(7) ); } +} + +TEST("testVisitAndUpdates") { + test::DirectoryHandler testDir("test7"); + testVisitOverGeneratedDomain(testDir.getDir()); + testVisitOverPreExistingDomain(testDir.getDir()); + partialUpdateTest(testDir.getDir()); +} + + TEST("testCrcVersions") { - createAndFillDomain("ccitt_crc32", Encoding(Encoding::Crc::ccitt_crc32, Encoding::Compression::none), 0); - createAndFillDomain("xxh64", Encoding(Encoding::Crc::xxh64, Encoding::Compression::none), 1); + test::DirectoryHandler testDir("test13"); + try { + createAndFillDomain(testDir.getDir(),"ccitt_crc32", Encoding(Encoding::Crc::ccitt_crc32, Encoding::Compression::none), 0); + ASSERT_TRUE(false); + } catch (vespalib::IllegalArgumentException & e) { + EXPECT_TRUE(e.getMessage().find("Compression:none is not allowed for the tls") != vespalib::string::npos); + } + createAndFillDomain(testDir.getDir(), "xxh64", Encoding(Encoding::Crc::xxh64, Encoding::Compression::zstd), 0); - verifyDomain("ccitt_crc32"); - verifyDomain("xxh64"); + verifyDomain(testDir.getDir(), "xxh64"); } TEST("testRemove") { + test::DirectoryHandler testDir("testremove"); DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("testremove", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x10000)); TransLogClient tls("tcp/localhost:18377"); vespalib::string name("test-delete"); @@ -618,14 +638,15 @@ assertStatus(Session &s, SerialNum expFirstSerial, SerialNum expLastSerial, uint } -TEST("test sending a lot of data") { +void + testSendingAlotOfDataSync(const vespalib::string & testDir) { const unsigned int NUM_PACKETS = 1000; const unsigned int NUM_ENTRIES = 100; const unsigned int TOTAL_NUM_ENTRIES = NUM_PACKETS * NUM_ENTRIES; const vespalib::string MANY("many"); { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test8", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x80000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x80000)); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, MANY, 0); @@ -648,7 +669,7 @@ TEST("test sending a lot of data") { } { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test8", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, "many"); @@ -669,7 +690,7 @@ TEST("test sending a lot of data") { } { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test8", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, MANY); @@ -690,14 +711,14 @@ TEST("test sending a lot of data") { } } -TEST("test sending a lot of data async") { +void testSendingAlotOfDataAsync(const vespalib::string & testDir) { const unsigned int NUM_PACKETS = 1000; const unsigned int NUM_ENTRIES = 100; const unsigned int TOTAL_NUM_ENTRIES = NUM_PACKETS * NUM_ENTRIES; const vespalib::string MANY("many-async"); { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test8", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x80000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x80000)); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, MANY, 1); auto s1 = openDomainTest(tls, MANY); @@ -719,7 +740,7 @@ TEST("test sending a lot of data async") { } { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test8", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(testDir, 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, MANY); @@ -740,16 +761,21 @@ TEST("test sending a lot of data async") { } } - +TEST("test sending a lot of data both sync and async") { + test::DirectoryHandler testDir("test8"); + testSendingAlotOfDataSync(testDir.getDir()); + testSendingAlotOfDataAsync(testDir.getDir()); +} TEST("testErase") { const unsigned int NUM_PACKETS = 1000; const unsigned int NUM_ENTRIES = 100; const unsigned int TOTAL_NUM_ENTRIES = NUM_PACKETS * NUM_ENTRIES; + test::DirectoryHandler testDir("test12"); { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test12", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x80000)); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x80000)); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, "erase", 0); @@ -758,7 +784,7 @@ TEST("testErase") { } { DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test12", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, "erase"); @@ -845,7 +871,8 @@ TEST("testSync") { const unsigned int TOTAL_NUM_ENTRIES = NUM_PACKETS * NUM_ENTRIES; DummyFileHeaderContext fileHeaderContext; - TransLogServer tlss("test9", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + test::DirectoryHandler testDir("test9"); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, "sync", 0); @@ -866,8 +893,9 @@ TEST("test truncate on version mismatch") { uint64_t fromOld(0), toOld(0); size_t countOld(0); DummyFileHeaderContext fileHeaderContext; + test::DirectoryHandler testDir("test11"); { - TransLogServer tlss("test11", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x1000000)); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x1000000)); TransLogClient tls("tcp/localhost:18377"); createDomainTest(tls, "sync", 0); @@ -879,7 +907,7 @@ TEST("test truncate on version mismatch") { EXPECT_TRUE(s1->sync(2, syncedTo)); EXPECT_EQUAL(syncedTo, TOTAL_NUM_ENTRIES); } - FastOS_File f("test11/sync/sync-0000000000000000"); + FastOS_File f((testDir.getDir() + "/sync/sync-0000000000000000").c_str()); EXPECT_TRUE(f.OpenWriteOnlyExisting()); EXPECT_TRUE(f.SetPosition(f.GetSize())); @@ -888,7 +916,7 @@ TEST("test truncate on version mismatch") { EXPECT_EQUAL(static_cast<ssize_t>(sizeof(tmp)), f.Write2(tmp, sizeof(tmp))); EXPECT_TRUE(f.Close()); { - TransLogServer tlss("test11", 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(testDir.getDir(), 18377, ".", fileHeaderContext, createDomainConfig(0x10000)); TransLogClient tls("tcp/localhost:18377"); auto s1 = openDomainTest(tls, "sync"); uint64_t from(0), to(0); @@ -905,14 +933,16 @@ TEST("test truncation after short read") { const unsigned int NUM_ENTRIES = 1; const unsigned int TOTAL_NUM_ENTRIES = NUM_PACKETS * NUM_ENTRIES; const unsigned int ENTRYSIZE = 4080; - vespalib::string topdir("test10"); + test::DirectoryHandler topdir("test10"); vespalib::string domain("truncate"); - vespalib::string dir(topdir + "/" + domain); + vespalib::string dir(topdir.getDir() + "/" + domain); vespalib::string tlsspec("tcp/localhost:18377"); + + DomainConfig domainConfig = createDomainConfig(0x10000); DummyFileHeaderContext fileHeaderContext; { - TransLogServer tlss(topdir, 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(topdir.getDir(), 18377, ".", fileHeaderContext, domainConfig); TransLogClient tls(tlsspec); createDomainTest(tls, domain, 0); @@ -924,18 +954,14 @@ TEST("test truncation after short read") { EXPECT_TRUE(s1->sync(TOTAL_NUM_ENTRIES, syncedTo)); EXPECT_EQUAL(syncedTo, TOTAL_NUM_ENTRIES); } + EXPECT_EQUAL(2u, countFiles(dir)); { - EXPECT_EQUAL(2u, countFiles(dir)); - } - { - TransLogServer tlss(topdir, 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(topdir.getDir(), 18377, ".", fileHeaderContext, domainConfig); TransLogClient tls(tlsspec); auto s1 = openDomainTest(tls, domain); checkFilledDomainTest(*s1, TOTAL_NUM_ENTRIES); } - { - EXPECT_EQUAL(2u, countFiles(dir)); - } + EXPECT_EQUAL(2u, countFiles(dir)); { vespalib::string filename(dir + "/truncate-0000000000000017"); FastOS_File trfile(filename.c_str()); @@ -944,14 +970,12 @@ TEST("test truncation after short read") { trfile.Close(); } { - TransLogServer tlss(topdir, 18377, ".", fileHeaderContext, DomainConfig().setPartSizeLimit(0x10000)); + TransLogServer tlss(topdir.getDir(), 18377, ".", fileHeaderContext, domainConfig); TransLogClient tls(tlsspec); auto s1 = openDomainTest(tls, domain); checkFilledDomainTest(*s1, TOTAL_NUM_ENTRIES - 1); } - { - EXPECT_EQUAL(2u, countFiles(dir)); - } + EXPECT_EQUAL(2u, countFiles(dir)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/transactionlog/translogclient_test.sh b/searchlib/src/tests/transactionlog/translogclient_test.sh deleted file mode 100755 index 50d7c73fd6a..00000000000 --- a/searchlib/src/tests/transactionlog/translogclient_test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -set -e -rm -rf test7 test8 test9 test10 test11 test12 test13 testremove -$VALGRIND ./searchlib_translogclient_test_app -rm -rf test7 test8 test9 test10 test11 test12 test13 testremove diff --git a/searchlib/src/vespa/searchlib/aggregation/fs4hit.h b/searchlib/src/vespa/searchlib/aggregation/fs4hit.h index 135bbe44887..7cb078fe7e8 100644 --- a/searchlib/src/vespa/searchlib/aggregation/fs4hit.h +++ b/searchlib/src/vespa/searchlib/aggregation/fs4hit.h @@ -19,7 +19,7 @@ private: public: DECLARE_IDENTIFIABLE_NS2(search, aggregation, FS4Hit); DECLARE_NBO_SERIALIZE; - FS4Hit() : Hit(), _path(0), _docId(0), _globalId(), _distributionKey(-1) {} + FS4Hit() noexcept : Hit(), _path(0), _docId(0), _globalId(), _distributionKey(-1) {} FS4Hit(DocId docId, HitRank rank) : Hit(rank), _path(0), _docId(docId), _globalId(), _distributionKey(-1) {} FS4Hit *clone() const override { return new FS4Hit(*this); } diff --git a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt index 4f46c279565..9e5a8d4dfbb 100644 --- a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt +++ b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt @@ -41,6 +41,7 @@ vespa_add_library(searchlib_attribute OBJECT enumattributesaver.cpp enumcomparator.cpp enumhintsearchcontext.cpp + enum_store_compaction_spec.cpp enum_store_dictionary.cpp enum_store_loaders.cpp enumstore.cpp diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp index b68923b90bf..e40717e6375 100644 --- a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp +++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp @@ -191,9 +191,9 @@ AttributeHeader::internalExtractTags(const vespalib::GenericHeader &header) } AttributeHeader -AttributeHeader::extractTags(const vespalib::GenericHeader &header) +AttributeHeader::extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name) { - AttributeHeader result; + AttributeHeader result(file_name); result.internalExtractTags(header); return result; } diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.h b/searchlib/src/vespa/searchlib/attribute/attribute_header.h index 00da28baf80..7c0b8f3084b 100644 --- a/searchlib/src/vespa/searchlib/attribute/attribute_header.h +++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.h @@ -69,7 +69,7 @@ public: bool getPredicateParamsSet() const { return _predicateParamsSet; } bool getCollectionTypeParamsSet() const { return _collectionTypeParamsSet; } const std::optional<HnswIndexParams>& get_hnsw_index_params() const { return _hnsw_index_params; } - static AttributeHeader extractTags(const vespalib::GenericHeader &header); + static AttributeHeader extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name); void addTags(vespalib::GenericHeader &header) const; }; diff --git a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp index 3bc1e5ec25f..a2ac482ebf3 100644 --- a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp +++ b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp @@ -800,6 +800,7 @@ AttributeVector::update_config(const Config& cfg) } drain_hold(1_Mi); // Wait until 1MiB or less on hold _config.setCompactionStrategy(cfg.getCompactionStrategy()); + updateStat(true); commit(); // might trigger compaction drain_hold(1_Mi); // Wait until 1MiB or less on hold } diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp new file mode 100644 index 00000000000..43f599346f4 --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp @@ -0,0 +1,30 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "enum_store_compaction_spec.h" +#include "i_enum_store.h" +#include "i_enum_store_dictionary.h" +#include <vespa/vespalib/datastore/compaction_strategy.h> +#include <vespa/vespalib/util/address_space.h> + +namespace search::enumstore { + +using vespalib::datastore::CompactionStrategy; + +vespalib::MemoryUsage +EnumStoreCompactionSpec::update_stat(IEnumStore& enum_store, const CompactionStrategy& compaction_strategy) +{ + auto values_memory_usage = enum_store.get_values_memory_usage(); + auto values_address_space_usage = enum_store.get_values_address_space_usage(); + _values = compaction_strategy.should_compact(values_memory_usage, values_address_space_usage); + auto& dict = enum_store.get_dictionary(); + auto dictionary_btree_usage = dict.get_btree_memory_usage(); + _btree_dictionary = compaction_strategy.should_compact_memory(dictionary_btree_usage); + auto dictionary_hash_usage = dict.get_hash_memory_usage(); + _hash_dictionary = compaction_strategy.should_compact_memory(dictionary_hash_usage); + auto retval = values_memory_usage; + retval.merge(dictionary_btree_usage); + retval.merge(dictionary_hash_usage); + return retval; +} + +} diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h new file mode 100644 index 00000000000..11ecb4e93ef --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h @@ -0,0 +1,35 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/vespalib/datastore/compaction_spec.h> + +namespace search { class IEnumStore; } +namespace vespalib { class MemoryUsage; } +namespace vespalib::datastore { class CompactionStrategy; } + +namespace search::enumstore { + +/* + * Class describing how to compact an enum store + */ +class EnumStoreCompactionSpec { + using CompactionSpec = vespalib::datastore::CompactionSpec; + CompactionSpec _values; + bool _btree_dictionary; + bool _hash_dictionary; +public: + EnumStoreCompactionSpec() noexcept + : _values(), + _btree_dictionary(false), + _hash_dictionary(false) + { + } + + CompactionSpec get_values() const noexcept { return _values; } + bool btree_dictionary() const noexcept { return _btree_dictionary; } + bool hash_dictionary() const noexcept { return _hash_dictionary; } + vespalib::MemoryUsage update_stat(IEnumStore& enum_store, const vespalib::datastore::CompactionStrategy &compaction_strategy); +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp index 6c929ad5981..8bc28abc238 100644 --- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp @@ -311,6 +311,165 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists( } template <> +bool +EnumStoreDictionary<EnumTree>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)>, const EntryRefFilter&) +{ + LOG_ABORT("should not be reached"); +} + +namespace { + +template <typename HashDictionaryT> +class ChangeWriterBase +{ +protected: + HashDictionaryT* _hash_dict; + static constexpr bool has_hash_dictionary = true; + ChangeWriterBase() + : _hash_dict(nullptr) + { + } +public: + void set_hash_dict(HashDictionaryT &hash_dict) { _hash_dict = &hash_dict; } +}; + +template <> +class ChangeWriterBase<vespalib::datastore::NoHashDictionary> +{ +protected: + static constexpr bool has_hash_dictionary = false; + ChangeWriterBase() = default; +}; + +template <typename HashDictionaryT> +class ChangeWriter : public ChangeWriterBase<HashDictionaryT> { + using Parent = ChangeWriterBase<HashDictionaryT>; + using Parent::has_hash_dictionary; + std::vector<std::pair<EntryRef,uint32_t*>> _tree_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + bool write(const std::vector<EntryRef>& refs); + void emplace_back(EntryRef key, uint32_t& tree_ref) { _tree_refs.emplace_back(std::make_pair(key, &tree_ref)); } +}; + +template <typename HashDictionaryT> +ChangeWriter<HashDictionaryT>::ChangeWriter(uint32_t capacity) + : ChangeWriterBase<HashDictionaryT>(), + _tree_refs() +{ + _tree_refs.reserve(capacity); +} + +template <typename HashDictionaryT> +ChangeWriter<HashDictionaryT>::~ChangeWriter() = default; + +template <typename HashDictionaryT> +bool +ChangeWriter<HashDictionaryT>::write(const std::vector<EntryRef> &refs) +{ + bool changed = false; + assert(refs.size() == _tree_refs.size()); + auto tree_ref = _tree_refs.begin(); + for (auto ref : refs) { + EntryRef old_ref(*tree_ref->second); + if (ref != old_ref) { + if (!changed) { + // Note: Needs review when porting to other platforms + // Assumes that other CPUs observes stores from this CPU in order + std::atomic_thread_fence(std::memory_order_release); + changed = true; + } + *tree_ref->second = ref.ref(); + if constexpr (has_hash_dictionary) { + auto find_result = this->_hash_dict->find(this->_hash_dict->get_default_comparator(), tree_ref->first); + assert(find_result != nullptr && find_result->first.load_relaxed() == tree_ref->first); + assert(find_result->second.load_relaxed() == old_ref); + find_result->second.store_release(ref); + } + } + ++tree_ref; + } + assert(tree_ref == _tree_refs.end()); + _tree_refs.clear(); + return changed; +} + +} + +template <typename BTreeDictionaryT, typename HashDictionaryT> +bool +EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) +{ + if constexpr (has_btree_dictionary) { + std::vector<EntryRef> refs; + refs.reserve(1024); + bool changed = false; + ChangeWriter<HashDictionaryT> change_writer(refs.capacity()); + if constexpr (has_hash_dictionary) { + change_writer.set_hash_dict(this->_hash_dict); + } + auto& dict = this->_btree_dict; + for (auto itr = dict.begin(); itr.valid(); ++itr) { + EntryRef ref(itr.getData()); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + change_writer.emplace_back(itr.getKey(), itr.getWData()); + if (refs.size() >= refs.capacity()) { + normalize(refs); + changed |= change_writer.write(refs); + refs.clear(); + } + } + } + } + if (!refs.empty()) { + normalize(refs); + changed |= change_writer.write(refs); + } + return changed; + } else { + return this->_hash_dict.normalize_values(normalize, filter); + } +} + +template <> +void +EnumStoreDictionary<EnumTree>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)>, const EntryRefFilter&) +{ + LOG_ABORT("should not be reached"); +} + +template <typename BTreeDictionaryT, typename HashDictionaryT> +void +EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) +{ + if constexpr (has_btree_dictionary) { + std::vector<EntryRef> refs; + refs.reserve(1024); + auto& dict = this->_btree_dict; + for (auto itr = dict.begin(); itr.valid(); ++itr) { + EntryRef ref(itr.getData()); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + if (refs.size() >= refs.capacity()) { + callback(refs); + refs.clear(); + } + } + } + } + if (!refs.empty()) { + callback(refs); + } + } else { + this->_hash_dict.foreach_value(callback, filter); + } +} + +template <> const EnumPostingTree & EnumStoreDictionary<EnumTree>::get_posting_dictionary() const { diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h index 4d0509c0eb1..db1176c5484 100644 --- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h @@ -16,6 +16,7 @@ template <typename BTreeDictionaryT, typename HashDictionaryT = vespalib::datast class EnumStoreDictionary : public vespalib::datastore::UniqueStoreDictionary<BTreeDictionaryT, IEnumStoreDictionary, HashDictionaryT> { protected: using EntryRef = IEnumStoreDictionary::EntryRef; + using EntryRefFilter = IEnumStoreDictionary::EntryRefFilter; using Index = IEnumStoreDictionary::Index; using BTreeDictionaryType = BTreeDictionaryT; using EntryComparator = IEnumStoreDictionary::EntryComparator; @@ -54,6 +55,8 @@ public: void clear_all_posting_lists(std::function<void(EntryRef)> clearer) override; void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override; bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) override; + bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) override; + void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) override; const EnumPostingTree& get_posting_dictionary() const override; }; diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp index 2c6ac521b30..3e578856c2b 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp @@ -81,7 +81,7 @@ void EnumAttribute<B>::populate_address_space_usage(AddressSpaceUsage& usage) const { B::populate_address_space_usage(usage); - usage.set(AddressSpaceComponents::enum_store, _enumStore.get_address_space_usage()); + usage.set(AddressSpaceComponents::enum_store, _enumStore.get_values_address_space_usage()); } } // namespace search diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h index a140a529c7d..7fe586b8ccc 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h @@ -2,6 +2,7 @@ #pragma once +#include "enum_store_compaction_spec.h" #include "enum_store_dictionary.h" #include "enum_store_loaders.h" #include "enumcomparator.h" @@ -55,10 +56,7 @@ private: bool _is_folded; ComparatorType _comparator; ComparatorType _foldedComparator; - vespalib::MemoryUsage _cached_values_memory_usage; - vespalib::AddressSpace _cached_values_address_space_usage; - vespalib::MemoryUsage _cached_dictionary_btree_usage; - vespalib::MemoryUsage _cached_dictionary_hash_usage; + enumstore::EnumStoreCompactionSpec _compaction_spec; EnumStoreT(const EnumStoreT & rhs) = delete; EnumStoreT & operator=(const EnumStoreT & rhs) = delete; @@ -96,7 +94,7 @@ public: vespalib::MemoryUsage get_values_memory_usage() const override { return _store.get_allocator().get_data_store().getMemoryUsage(); } vespalib::MemoryUsage get_dictionary_memory_usage() const override { return _dict->get_memory_usage(); } - vespalib::AddressSpace get_address_space_usage() const; + vespalib::AddressSpace get_values_address_space_usage() const override; void transfer_hold_lists(generation_t generation); void trim_hold_lists(generation_t first_used); @@ -199,9 +197,9 @@ public: bool find_index(EntryType value, Index& idx) const; void free_unused_values() override; void free_unused_values(IndexList to_remove); - vespalib::MemoryUsage update_stat() override; + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) override; std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override; - std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override; + std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) override; bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) override; uint64_t get_compaction_count() const override { return _store.get_data_store().get_compaction_count(); diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp index c202d780659..e1adca2b89a 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp @@ -18,10 +18,11 @@ #include <vespa/vespalib/datastore/unique_store_string_allocator.hpp> #include <vespa/vespalib/util/array.hpp> #include <vespa/searchlib/util/bufferwriter.h> -#include <vespa/searchcommon/common/compaction_strategy.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> namespace search { +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryComparator; std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary> @@ -77,8 +78,7 @@ EnumStoreT<EntryT>::EnumStoreT(bool has_postings, const DictionaryConfig & dict_ _is_folded(dict_cfg.getMatch() == DictionaryConfig::Match::UNCASED), _comparator(_store.get_data_store()), _foldedComparator(make_optionally_folded_comparator(is_folded())), - _cached_values_memory_usage(), - _cached_values_address_space_usage(0, 0, (1ull << 32)) + _compaction_spec() { _store.set_dictionary(make_enum_store_dictionary(*this, has_postings, dict_cfg, allocate_comparator(), @@ -91,9 +91,9 @@ EnumStoreT<EntryT>::~EnumStoreT() = default; template <typename EntryT> vespalib::AddressSpace -EnumStoreT<EntryT>::get_address_space_usage() const +EnumStoreT<EntryT>::get_values_address_space_usage() const { - return _store.get_address_space_usage(); + return _store.get_values_address_space_usage(); } template <typename EntryT> @@ -211,40 +211,26 @@ EnumStoreT<EntryT>::insert(EntryType value) template <typename EntryT> vespalib::MemoryUsage -EnumStoreT<EntryT>::update_stat() +EnumStoreT<EntryT>::update_stat(const CompactionStrategy& compaction_strategy) { - auto &store = _store.get_data_store(); - _cached_values_memory_usage = store.getMemoryUsage(); - _cached_values_address_space_usage = store.getAddressSpaceUsage(); - _cached_dictionary_btree_usage = _dict->get_btree_memory_usage(); - _cached_dictionary_hash_usage = _dict->get_hash_memory_usage(); - auto retval = _cached_values_memory_usage; - retval.merge(_cached_dictionary_btree_usage); - retval.merge(_cached_dictionary_hash_usage); - return retval; + return _compaction_spec.update_stat(*this, compaction_strategy); } template <typename EntryT> std::unique_ptr<IEnumStore::EnumIndexRemapper> EnumStoreT<EntryT>::consider_compact_values(const CompactionStrategy& compaction_strategy) { - size_t used_bytes = _cached_values_memory_usage.usedBytes(); - size_t dead_bytes = _cached_values_memory_usage.deadBytes(); - size_t used_address_space = _cached_values_address_space_usage.used(); - size_t dead_address_space = _cached_values_address_space_usage.dead(); - bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes); - bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space); - if (compact_memory || compact_address_space) { - return compact_worst_values(compact_memory, compact_address_space); + if (_compaction_spec.get_values().compact()) { + return compact_worst_values(_compaction_spec.get_values(), compaction_strategy); } return std::unique_ptr<IEnumStore::EnumIndexRemapper>(); } template <typename EntryT> std::unique_ptr<IEnumStore::EnumIndexRemapper> -EnumStoreT<EntryT>::compact_worst_values(bool compact_memory, bool compact_address_space) +EnumStoreT<EntryT>::compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - return _store.compact_worst(compact_memory, compact_address_space); + return _store.compact_worst(compaction_spec, compaction_strategy); } template <typename EntryT> @@ -254,16 +240,12 @@ EnumStoreT<EntryT>::consider_compact_dictionary(const CompactionStrategy& compac if (_dict->has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_dictionary_btree_usage.usedBytes(), - _cached_dictionary_btree_usage.deadBytes())) - { - _dict->compact_worst(true, false); + if (_compaction_spec.btree_dictionary()) { + _dict->compact_worst(true, false, compaction_strategy); return true; } - if (compaction_strategy.should_compact_memory(_cached_dictionary_hash_usage.usedBytes(), - _cached_dictionary_hash_usage.deadBytes())) - { - _dict->compact_worst(false, true); + if (_compaction_spec.hash_dictionary()) { + _dict->compact_worst(false, true, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h index 1f3165828bc..e3782514530 100644 --- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h +++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h @@ -6,10 +6,18 @@ #include "enum_store_types.h" #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/datastore/unique_store_enumerator.h> -#include <vespa/vespalib/util/memoryusage.h> + +namespace vespalib { + +class AddressSpace; +class MemoryUsage; + +} namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; class DataStoreBase; template <typename> class UniqueStoreRemapper; @@ -19,7 +27,6 @@ template <typename> class UniqueStoreRemapper; namespace search { class BufferWriter; -class CompactionStrategy; class IEnumStoreDictionary; /** @@ -30,6 +37,8 @@ public: using Index = enumstore::Index; using InternalIndex = enumstore::InternalIndex; using IndexVector = enumstore::IndexVector; + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EnumHandle = enumstore::EnumHandle; using EnumVector = enumstore::EnumVector; using EnumIndexRemapper = vespalib::datastore::UniqueStoreRemapper<InternalIndex>; @@ -49,10 +58,11 @@ public: virtual const IEnumStoreDictionary& get_dictionary() const = 0; virtual uint32_t get_num_uniques() const = 0; virtual vespalib::MemoryUsage get_values_memory_usage() const = 0; + virtual vespalib::AddressSpace get_values_address_space_usage() const = 0; virtual vespalib::MemoryUsage get_dictionary_memory_usage() const = 0; - virtual vespalib::MemoryUsage update_stat() = 0; + virtual vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) = 0; virtual std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) = 0; - virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) = 0; + virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0; virtual bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) = 0; virtual uint64_t get_compaction_count() const = 0; // Should only be used by unit tests. diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h index a8cf6881b86..a9716ec5d05 100644 --- a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h +++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h @@ -30,6 +30,7 @@ class IEnumStoreDictionary : public vespalib::datastore::IUniqueStoreDictionary public: using EntryRef = vespalib::datastore::EntryRef; using EntryComparator = vespalib::datastore::EntryComparator; + using EntryRefFilter = vespalib::datastore::EntryRefFilter; using EnumVector = IEnumStore::EnumVector; using Index = IEnumStore::Index; using IndexList = IEnumStore::IndexList; @@ -52,7 +53,25 @@ public: virtual Index remap_index(Index idx) = 0; virtual void clear_all_posting_lists(std::function<void(EntryRef)> clearer) = 0; virtual void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0; + /* + * Scan dictionary and call normalize function for each value. If + * returned value is different then write back the modified value to + * the dictionary. Only used by unit tests. + */ virtual bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) = 0; + /* + * Scan dictionary and call normalize function for batches of values + * that pass the filter. Write back modified values to the dictionary. + * Used by compaction of posting lists when moving short arrays, + * bitvectors or btree roots. + */ + virtual bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) = 0; + /* + * Scan dictionary and call callback function for batches of values + * that pass the filter. Used by compaction of posting lists when + * moving btree nodes. + */ + virtual void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) = 0; virtual const EnumPostingTree& get_posting_dictionary() const = 0; }; diff --git a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h index 135870e29a5..20cec9a31c2 100644 --- a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h +++ b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h @@ -4,7 +4,7 @@ #include <vespa/searchcommon/attribute/iattributevector.h> -namespace search { class CompactionStrategy; } +namespace vespalib::datastore { class CompactionStrategy; } namespace vespalib { class MemoryUsage; } @@ -13,16 +13,9 @@ namespace search::attribute { class IPostingListAttributeBase { public: - virtual - ~IPostingListAttributeBase() - { - } - - virtual void - clearPostings(IAttributeVector::EnumHandle eidx, - uint32_t fromLid, - uint32_t toLid) = 0; - + using CompactionStrategy = vespalib::datastore::CompactionStrategy; + virtual ~IPostingListAttributeBase() = default; + virtual void clearPostings(IAttributeVector::EnumHandle eidx, uint32_t fromLid, uint32_t toLid) = 0; virtual void forwardedShrinkLidSpace(uint32_t newSize) = 0; virtual vespalib::MemoryUsage getMemoryUsage() const = 0; virtual bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) = 0; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h index 9720e88543d..81abaa05a45 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h @@ -44,7 +44,7 @@ public: void doneLoadFromMultiValue() { _store.setInitializing(false); } - void compactWorst(bool compactMemory, bool compactAddressSpace) override; + void compactWorst(CompactionSpec compactionSpec, const CompactionStrategy& compaction_strategy) override; vespalib::AddressSpace getAddressSpaceUsage() const override; vespalib::MemoryUsage getArrayStoreMemoryUsage() const override; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp index 25065a200e9..fb81a60cb13 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp @@ -53,9 +53,9 @@ MultiValueMapping<EntryT,RefT>::replace(uint32_t docId, ConstArrayRef values) template <typename EntryT, typename RefT> void -MultiValueMapping<EntryT,RefT>::compactWorst(bool compactMemory, bool compactAddressSpace) +MultiValueMapping<EntryT,RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compactMemory, compactAddressSpace)); + vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compaction_spec, compaction_strategy)); if (compactionContext) { compactionContext->compact(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size())); } diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp index 2edc30cc2c4..b0d50c129c6 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp @@ -1,17 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "multi_value_mapping_base.h" -#include <vespa/searchcommon/common/compaction_strategy.h> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <cassert> namespace search::attribute { +using vespalib::datastore::CompactionStrategy; + MultiValueMappingBase::MultiValueMappingBase(const vespalib::GrowStrategy &gs, vespalib::GenerationHolder &genHolder) : _indices(gs, genHolder), _totalValues(0u), - _cachedArrayStoreMemoryUsage(), - _cachedArrayStoreAddressSpaceUsage(0, 0, (1ull << 32)) + _compaction_spec() { } @@ -65,11 +67,12 @@ MultiValueMappingBase::getMemoryUsage() const } vespalib::MemoryUsage -MultiValueMappingBase::updateStat() +MultiValueMappingBase::updateStat(const CompactionStrategy& compaction_strategy) { - _cachedArrayStoreAddressSpaceUsage = getAddressSpaceUsage(); - vespalib::MemoryUsage retval = getArrayStoreMemoryUsage(); - _cachedArrayStoreMemoryUsage = retval; + auto array_store_address_space_usage = getAddressSpaceUsage(); + auto array_store_memory_usage = getArrayStoreMemoryUsage(); + _compaction_spec = compaction_strategy.should_compact(array_store_memory_usage, array_store_address_space_usage); + auto retval = array_store_memory_usage; retval.merge(_indices.getMemoryUsage()); return retval; } @@ -77,14 +80,8 @@ MultiValueMappingBase::updateStat() bool MultiValueMappingBase::considerCompact(const CompactionStrategy &compactionStrategy) { - size_t usedBytes = _cachedArrayStoreMemoryUsage.usedBytes(); - size_t deadBytes = _cachedArrayStoreMemoryUsage.deadBytes(); - size_t usedArrays = _cachedArrayStoreAddressSpaceUsage.used(); - size_t deadArrays = _cachedArrayStoreAddressSpaceUsage.dead(); - bool compactMemory = compactionStrategy.should_compact_memory(usedBytes, deadBytes); - bool compactAddressSpace = compactionStrategy.should_compact_address_space(usedArrays, deadArrays); - if (compactMemory || compactAddressSpace) { - compactWorst(compactMemory, compactAddressSpace); + if (_compaction_spec.compact()) { + compactWorst(_compaction_spec, compactionStrategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h index 952e9dbbe56..f27a9f1667c 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h @@ -2,12 +2,16 @@ #pragma once +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/util/address_space.h> #include <vespa/vespalib/util/rcuvector.h> #include <functional> -namespace search { class CompactionStrategy; } +namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; +} namespace search::attribute { @@ -17,14 +21,15 @@ namespace search::attribute { class MultiValueMappingBase { public: + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EntryRef = vespalib::datastore::EntryRef; using RefVector = vespalib::RcuVectorBase<EntryRef>; protected: RefVector _indices; size_t _totalValues; - vespalib::MemoryUsage _cachedArrayStoreMemoryUsage; - vespalib::AddressSpace _cachedArrayStoreAddressSpaceUsage; + CompactionSpec _compaction_spec; MultiValueMappingBase(const vespalib::GrowStrategy &gs, vespalib::GenerationHolder &genHolder); virtual ~MultiValueMappingBase(); @@ -38,7 +43,7 @@ public: virtual vespalib::MemoryUsage getArrayStoreMemoryUsage() const = 0; virtual vespalib::AddressSpace getAddressSpaceUsage() const = 0; vespalib::MemoryUsage getMemoryUsage() const; - vespalib::MemoryUsage updateStat(); + vespalib::MemoryUsage updateStat(const CompactionStrategy& compaction_strategy); size_t getTotalValueCnt() const { return _totalValues; } RefCopyVector getRefCopy(uint32_t size) const; @@ -51,7 +56,7 @@ public: uint32_t getNumKeys() const { return _indices.size(); } uint32_t getCapacityKeys() const { return _indices.capacity(); } - virtual void compactWorst(bool compatMemory, bool compactAddressSpace) = 0; + virtual void compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0; bool considerCompact(const CompactionStrategy &compactionStrategy); }; diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp index b114a355bb4..8790bdd9885 100644 --- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp @@ -30,13 +30,17 @@ remap_enum_store_refs(const EnumIndexRemapper& remapper, AttributeVector& v, att v.logEnumStoreEvent("compactfixup", "drain"); { AttributeVector::EnumModifier enum_guard(v.getEnumModifier()); + auto& filter = remapper.get_entry_ref_filter(); v.logEnumStoreEvent("compactfixup", "start"); for (uint32_t doc = 0; doc < v.getNumDocs(); ++doc) { vespalib::ConstArrayRef<WeightedIndex> indicesRef(multi_value_mapping.get(doc)); WeightedIndexVector indices(indicesRef.cbegin(), indicesRef.cend()); for (uint32_t i = 0; i < indices.size(); ++i) { - EnumIndex oldIndex = indices[i].value(); - indices[i] = WeightedIndex(remapper.remap(oldIndex), indices[i].weight()); + EnumIndex ref = indices[i].value(); + if (ref.valid() && filter.has(ref)) { + ref = remapper.remap(ref); + } + indices[i] = WeightedIndex(ref, indices[i].weight()); } std::atomic_thread_fence(std::memory_order_release); multi_value_mapping.replace(doc, indices); diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp index acd03a37497..251bbd7c8a7 100644 --- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp @@ -207,8 +207,9 @@ MultiValueEnumAttribute<B, M>::onUpdateStat() { // update statistics vespalib::MemoryUsage total; - total.merge(this->_enumStore.update_stat()); - total.merge(this->_mvMapping.updateStat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_enumStore.update_stat(compaction_strategy)); + total.merge(this->_mvMapping.updateStat(compaction_strategy)); total.merge(this->getChangeVectorMemoryUsage()); mergeMemoryStats(total); this->updateStatistics(this->_mvMapping.getTotalValueCnt(), this->_enumStore.get_num_uniques(), total.allocatedBytes(), diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp index 454eddeb6d4..10f837ec1ab 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp @@ -76,7 +76,8 @@ MultiValueNumericAttribute<B, M>::onCommit() template <typename B, typename M> void MultiValueNumericAttribute<B, M>::onUpdateStat() { - vespalib::MemoryUsage usage = this->_mvMapping.updateStat(); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + vespalib::MemoryUsage usage = this->_mvMapping.updateStat(compaction_strategy); usage.merge(this->getChangeVectorMemoryUsage()); this->updateStatistics(this->_mvMapping.getTotalValueCnt(), this->_mvMapping.getTotalValueCnt(), usage.allocatedBytes(), usage.usedBytes(), usage.deadBytes(), usage.allocatedBytesOnHold()); diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp index a655c30bc37..051a22bd5e8 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp @@ -18,7 +18,8 @@ template <typename B, typename M> void MultiValueNumericPostingAttribute<B, M>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->getPostingList().update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->getPostingList().update_stat(compaction_strategy)); } template <typename B, typename M> diff --git a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp index 2abe5894163..2bb4d2ada60 100644 --- a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp @@ -63,7 +63,8 @@ template <typename B, typename T> void MultiValueStringPostingAttributeT<B, T>::mergeMemoryStats(vespalib::MemoryUsage &total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B, typename T> diff --git a/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h new file mode 100644 index 00000000000..50b5402056f --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::attribute { + +/* + * Class describing how to compact a posting store + */ +class PostingStoreCompactionSpec { + bool _btree_nodes; // btree nodes + bool _store; // short arrays, b-tree roots, bitvectors +public: + PostingStoreCompactionSpec() noexcept + : _btree_nodes(false), + _store(false) + { + } + PostingStoreCompactionSpec(bool btree_nodes_, bool store_) noexcept + : _btree_nodes(btree_nodes_), + _store(store_) + { + } + bool btree_nodes() const noexcept { return _btree_nodes; } + bool store() const noexcept { return _store; } +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp index 3451c2b0456..df016b050af 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp @@ -7,11 +7,14 @@ #include <vespa/vespalib/btree/btreeiterator.hpp> #include <vespa/vespalib/btree/btreerootbase.cpp> #include <vespa/vespalib/datastore/datastore.hpp> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/entry_ref_filter.h> #include <vespa/vespalib/datastore/buffer_type.hpp> namespace search::attribute { using vespalib::btree::BTreeNoLeafData; +using vespalib::datastore::EntryRefFilter; // #define FORCE_BITVECTORS @@ -33,8 +36,7 @@ PostingStoreBase2::PostingStoreBase2(IEnumStoreDictionary& dictionary, Status &s _dictionary(dictionary), _status(status), _bvExtraBytes(0), - _cached_allocator_memory_usage(), - _cached_store_memory_usage() + _compaction_spec() { } @@ -127,45 +129,47 @@ PostingStore<DataT>::removeSparseBitVectors() } } if (needscan) { - res = _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { return consider_remove_sparse_bitvector(posting_idx); }); + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + filter.add_buffers(_bvType.get_active_buffers()); + res = _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs) + { consider_remove_sparse_bitvector(refs); }, + filter); } return res; } template <typename DataT> -typename PostingStore<DataT>::EntryRef -PostingStore<DataT>::consider_remove_sparse_bitvector(EntryRef ref) +void +PostingStore<DataT>::consider_remove_sparse_bitvector(std::vector<EntryRef>& refs) { - if (!ref.valid() || !isBitVector(getTypeId(EntryRef(ref)))) { - return ref; - } - RefType iRef(ref); - uint32_t typeId = getTypeId(iRef); - assert(isBitVector(typeId)); - assert(_bvs.find(ref.ref() )!= _bvs.end()); - BitVectorEntry *bve = getWBitVectorEntry(iRef); - BitVector &bv = *bve->_bv.get(); - uint32_t docFreq = bv.countTrueBits(); - if (bve->_tree.valid()) { - RefType iRef2(bve->_tree); - assert(isBTree(iRef2)); - const BTreeType *tree = getTreeEntry(iRef2); - assert(tree->size(_allocator) == docFreq); - (void) tree; - } - if (docFreq < _minBvDocFreq) { - dropBitVector(ref); - if (ref.valid()) { + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + assert(isBitVector(typeId)); + assert(_bvs.find(iRef.ref()) != _bvs.end()); + BitVectorEntry *bve = getWBitVectorEntry(iRef); + BitVector &bv = *bve->_bv.get(); + uint32_t docFreq = bv.countTrueBits(); + if (bve->_tree.valid()) { + RefType iRef2(bve->_tree); + assert(isBTree(iRef2)); + const BTreeType *tree = getTreeEntry(iRef2); + assert(tree->size(_allocator) == docFreq); + (void) tree; + } + if (docFreq < _minBvDocFreq) { + dropBitVector(ref); iRef = ref; - typeId = getTypeId(iRef); - if (isBTree(typeId)) { - BTreeType *tree = getWTreeEntry(iRef); - normalizeTree(ref, tree, false); + if (iRef.valid()) { + typeId = getTypeId(iRef); + if (isBTree(typeId)) { + BTreeType *tree = getWTreeEntry(iRef); + normalizeTree(ref, tree, false); + } } } } - return ref; } template <typename DataT> @@ -632,13 +636,14 @@ PostingStore<DataT>::getMemoryUsage() const template <typename DataT> vespalib::MemoryUsage -PostingStore<DataT>::update_stat() +PostingStore<DataT>::update_stat(const CompactionStrategy& compaction_strategy) { vespalib::MemoryUsage usage; - _cached_allocator_memory_usage = _allocator.getMemoryUsage(); - _cached_store_memory_usage = _store.getMemoryUsage(); - usage.merge(_cached_allocator_memory_usage); - usage.merge(_cached_store_memory_usage); + auto btree_nodes_memory_usage = _allocator.getMemoryUsage(); + auto store_memory_usage = _store.getMemoryUsage(); + _compaction_spec = PostingStoreCompactionSpec(compaction_strategy.should_compact_memory(btree_nodes_memory_usage), compaction_strategy.should_compact_memory(store_memory_usage)); + usage.merge(btree_nodes_memory_usage); + usage.merge(store_memory_usage); uint64_t bvExtraBytes = _bvExtraBytes; usage.incUsedBytes(bvExtraBytes); usage.incAllocatedBytes(bvExtraBytes); @@ -647,96 +652,114 @@ PostingStore<DataT>::update_stat() template <typename DataT> void -PostingStore<DataT>::move_btree_nodes(EntryRef ref) +PostingStore<DataT>::move_btree_nodes(const std::vector<EntryRef>& refs) { - if (ref.valid()) { + for (auto ref : refs) { RefType iRef(ref); + assert(iRef.valid()); uint32_t typeId = getTypeId(iRef); uint32_t clusterSize = getClusterSize(typeId); - if (clusterSize == 0) { - if (isBitVector(typeId)) { - BitVectorEntry *bve = getWBitVectorEntry(iRef); - RefType iRef2(bve->_tree); - if (iRef2.valid()) { - assert(isBTree(iRef2)); - BTreeType *tree = getWTreeEntry(iRef2); - tree->move_nodes(_allocator); - } - } else { - BTreeType *tree = getWTreeEntry(iRef); + assert(clusterSize == 0); + if (isBitVector(typeId)) { + BitVectorEntry *bve = getWBitVectorEntry(iRef); + RefType iRef2(bve->_tree); + if (iRef2.valid()) { + assert(isBTree(iRef2)); + BTreeType *tree = getWTreeEntry(iRef2); tree->move_nodes(_allocator); } + } else { + assert(isBTree(typeId)); + BTreeType *tree = getWTreeEntry(iRef); + tree->move_nodes(_allocator); } } } template <typename DataT> -typename PostingStore<DataT>::EntryRef -PostingStore<DataT>::move(EntryRef ref) +void +PostingStore<DataT>::move(std::vector<EntryRef>& refs) { - if (!ref.valid()) { - return EntryRef(); - } - RefType iRef(ref); - uint32_t typeId = getTypeId(iRef); - uint32_t clusterSize = getClusterSize(typeId); - if (clusterSize == 0) { - if (isBitVector(typeId)) { - BitVectorEntry *bve = getWBitVectorEntry(iRef); - RefType iRef2(bve->_tree); - if (iRef2.valid()) { - assert(isBTree(iRef2)); - if (_store.getCompacting(iRef2)) { - BTreeType *tree = getWTreeEntry(iRef2); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - bve->_tree = ref_and_ptr.ref; + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + uint32_t clusterSize = getClusterSize(typeId); + if (clusterSize == 0) { + if (isBitVector(typeId)) { + BitVectorEntry *bve = getWBitVectorEntry(iRef); + RefType iRef2(bve->_tree); + if (iRef2.valid()) { + assert(isBTree(iRef2)); + if (_store.getCompacting(iRef2)) { + BTreeType *tree = getWTreeEntry(iRef2); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + // Note: Needs review when porting to other platforms + // Assumes that other CPUs observes stores from this CPU in order + std::atomic_thread_fence(std::memory_order_release); + bve->_tree = ref_and_ptr.ref; + } } + if (_store.getCompacting(iRef)) { + auto new_ref = allocBitVectorCopy(*bve).ref; + _bvs.erase(iRef.ref()); + _bvs.insert(new_ref.ref()); + ref = new_ref; + } + } else { + assert(isBTree(typeId)); + assert(_store.getCompacting(iRef)); + BTreeType *tree = getWTreeEntry(iRef); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + ref = ref_and_ptr.ref; } - if (!_store.getCompacting(ref)) { - return ref; - } - auto new_ref = allocBitVectorCopy(*bve).ref; - _bvs.erase(ref.ref()); - _bvs.insert(new_ref.ref()); - return new_ref; } else { - if (!_store.getCompacting(ref)) { - return ref; - } - BTreeType *tree = getWTreeEntry(iRef); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - return ref_and_ptr.ref; + assert(_store.getCompacting(iRef)); + const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); + ref = allocKeyDataCopy(shortArray, clusterSize).ref; } } - if (!_store.getCompacting(ref)) { - return ref; - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return allocKeyDataCopy(shortArray, clusterSize).ref; } template <typename DataT> void -PostingStore<DataT>::compact_worst_btree_nodes() +PostingStore<DataT>::compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) { - auto to_hold = this->start_compact_worst_btree_nodes(); - _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { - move_btree_nodes(posting_idx); - return posting_idx; - }); + auto to_hold = this->start_compact_worst_btree_nodes(compaction_strategy); + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + // Only look at buffers containing bitvectors and btree roots + filter.add_buffers(this->_treeType.get_active_buffers()); + filter.add_buffers(_bvType.get_active_buffers()); + _dictionary.foreach_posting_list([this](const std::vector<EntryRef>& refs) + { move_btree_nodes(refs); }, filter); this->finish_compact_worst_btree_nodes(to_hold); } template <typename DataT> void -PostingStore<DataT>::compact_worst_buffers() +PostingStore<DataT>::compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto to_hold = this->start_compact_worst_buffers(); - _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { return move(posting_idx); }); + + auto to_hold = this->start_compact_worst_buffers(compaction_spec, compaction_strategy); + bool compact_btree_roots = false; + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + filter.add_buffers(to_hold); + // Start with looking at buffers being compacted + for (uint32_t buffer_id : to_hold) { + if (isBTree(_store.getBufferState(buffer_id).getTypeId())) { + compact_btree_roots = true; + } + } + if (compact_btree_roots) { + // If we are compacting btree roots then we also have to look at bitvector + // buffers + filter.add_buffers(_bvType.get_active_buffers()); + } + _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs) + { return move(refs); }, + filter); this->finishCompact(to_hold); } @@ -747,8 +770,8 @@ PostingStore<DataT>::consider_compact_worst_btree_nodes(const CompactionStrategy if (_allocator.getNodeStore().has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_allocator_memory_usage.usedBytes(), _cached_allocator_memory_usage.deadBytes())) { - compact_worst_btree_nodes(); + if (_compaction_spec.btree_nodes()) { + compact_worst_btree_nodes(compaction_strategy); return true; } return false; @@ -761,8 +784,9 @@ PostingStore<DataT>::consider_compact_worst_buffers(const CompactionStrategy& co if (_store.has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_store_memory_usage.usedBytes(), _cached_store_memory_usage.deadBytes())) { - compact_worst_buffers(); + if (_compaction_spec.store()) { + CompactionSpec compaction_spec(true, false); + compact_worst_buffers(compaction_spec, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.h b/searchlib/src/vespa/searchlib/attribute/postingstore.h index a0f0be1c430..949a355bc9d 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingstore.h +++ b/searchlib/src/vespa/searchlib/attribute/postingstore.h @@ -4,6 +4,7 @@ #include "enum_store_dictionary.h" #include "postinglisttraits.h" +#include "posting_store_compaction_spec.h" #include <set> namespace search { @@ -47,8 +48,7 @@ protected: IEnumStoreDictionary& _dictionary; Status &_status; uint64_t _bvExtraBytes; - vespalib::MemoryUsage _cached_allocator_memory_usage; - vespalib::MemoryUsage _cached_store_memory_usage; + PostingStoreCompactionSpec _compaction_spec; static constexpr uint32_t BUFFERTYPE_BITVECTOR = 9u; @@ -77,6 +77,8 @@ public: typedef typename Parent::AggregatedType AggregatedType; typedef typename Parent::BTreeTypeRefPair BTreeTypeRefPair; typedef typename Parent::Builder Builder; + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; typedef vespalib::datastore::EntryRef EntryRef; typedef std::less<uint32_t> CompareT; using Parent::applyNewArray; @@ -89,6 +91,7 @@ public: using Parent::getWTreeEntry; using Parent::getTreeEntry; using Parent::getKeyDataEntry; + using Parent::isBTree; using Parent::clusterLimit; using Parent::allocBTree; using Parent::allocBTreeCopy; @@ -105,10 +108,8 @@ public: ~PostingStore(); bool removeSparseBitVectors() override; - EntryRef consider_remove_sparse_bitvector(EntryRef ref); + void consider_remove_sparse_bitvector(std::vector<EntryRef> &refs); static bool isBitVector(uint32_t typeId) { return typeId == BUFFERTYPE_BITVECTOR; } - static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; } - bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); } void applyNew(EntryRef &ref, AddIter a, AddIter ae); @@ -186,13 +187,13 @@ public: static inline DataT bitVectorWeight(); vespalib::MemoryUsage getMemoryUsage() const; - vespalib::MemoryUsage update_stat(); + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy); - void move_btree_nodes(EntryRef ref); - EntryRef move(EntryRef ref); + void move_btree_nodes(const std::vector<EntryRef> &refs); + void move(std::vector<EntryRef>& refs); - void compact_worst_btree_nodes(); - void compact_worst_buffers(); + void compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); + void compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy); private: diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp index d9024af724b..6268a6da701 100644 --- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp @@ -194,7 +194,7 @@ PredicateAttribute::onLoad(vespalib::Executor *) buffer.moveFreeToData(size); const GenericHeader &header = loaded_buffer->getHeader(); - auto attributeHeader = attribute::AttributeHeader::extractTags(header); + auto attributeHeader = attribute::AttributeHeader::extractTags(header, getBaseFileName()); uint32_t version = attributeHeader.getVersion(); setCreateSerialNum(attributeHeader.getCreateSerialNum()); diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp index eb822313d61..4212a4ad247 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp @@ -24,6 +24,7 @@ namespace search::attribute { using document::DocumentId; using document::GlobalId; using document::IdParseException; +using vespalib::datastore::CompactionSpec; namespace { @@ -42,8 +43,7 @@ ReferenceAttribute::ReferenceAttribute(const vespalib::stringref baseFileName, : NotImplementedAttribute(baseFileName, cfg), _store(), _indices(getGenerationHolder()), - _cached_unique_store_values_memory_usage(), - _cached_unique_store_dictionary_memory_usage(), + _compaction_spec(), _gidToLidMapperFactory(), _referenceMappings(getGenerationHolder(), getCommittedDocIdLimitRef()) { @@ -191,11 +191,13 @@ ReferenceAttribute::onCommit() void ReferenceAttribute::onUpdateStat() { + auto& compaction_strategy = getConfig().getCompactionStrategy(); vespalib::MemoryUsage total = _store.get_values_memory_usage(); - _cached_unique_store_values_memory_usage = total; auto& dictionary = _store.get_dictionary(); - _cached_unique_store_dictionary_memory_usage = dictionary.get_memory_usage(); - total.merge(_cached_unique_store_dictionary_memory_usage); + auto dictionary_memory_usage = dictionary.get_memory_usage(); + _compaction_spec = ReferenceAttributeCompactionSpec(compaction_strategy.should_compact_memory(total), + compaction_strategy.should_compact_memory(dictionary_memory_usage)); + total.merge(dictionary_memory_usage); total.mergeGenerationHeldBytes(getGenerationHolder().getHeldBytes()); total.merge(_indices.getMemoryUsage()); total.merge(_referenceMappings.getMemoryUsage()); @@ -291,20 +293,18 @@ ReferenceAttribute::getReference(DocId doc) const bool ReferenceAttribute::consider_compact_values(const CompactionStrategy &compactionStrategy) { - size_t used_bytes = _cached_unique_store_values_memory_usage.usedBytes(); - size_t dead_bytes = _cached_unique_store_values_memory_usage.deadBytes(); - bool compact_memory = compactionStrategy.should_compact_memory(used_bytes, dead_bytes); - if (compact_memory) { - compact_worst_values(); + if (_compaction_spec.values()) { + compact_worst_values(compactionStrategy); return true; } return false; } void -ReferenceAttribute::compact_worst_values() +ReferenceAttribute::compact_worst_values(const CompactionStrategy& compaction_strategy) { - auto remapper(_store.compact_worst(true, true)); + CompactionSpec compaction_spec(true, true); + auto remapper(_store.compact_worst(compaction_spec, compaction_strategy)); if (remapper) { remapper->remap(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size())); remapper->done(); @@ -318,10 +318,8 @@ ReferenceAttribute::consider_compact_dictionary(const CompactionStrategy &compac if (dictionary.has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_unique_store_dictionary_memory_usage.usedBytes(), - _cached_unique_store_dictionary_memory_usage.deadBytes())) - { - dictionary.compact_worst(true, true); + if (_compaction_spec.dictionary()) { + dictionary.compact_worst(true, true, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h index 4016230ef89..f985c799c07 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h @@ -4,6 +4,7 @@ #include "not_implemented_attribute.h" #include "reference.h" +#include "reference_attribute_compaction_spec.h" #include "reference_mappings.h" #include <vespa/vespalib/datastore/unique_store.h> #include <vespa/vespalib/util/rcuvector.h> @@ -25,6 +26,7 @@ namespace search::attribute { class ReferenceAttribute : public NotImplementedAttribute { public: + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EntryRef = vespalib::datastore::EntryRef; using GlobalId = document::GlobalId; using ReferenceStore = vespalib::datastore::UniqueStore<Reference>; @@ -42,8 +44,7 @@ public: private: ReferenceStore _store; ReferenceStoreIndices _indices; - vespalib::MemoryUsage _cached_unique_store_values_memory_usage; - vespalib::MemoryUsage _cached_unique_store_dictionary_memory_usage; + ReferenceAttributeCompactionSpec _compaction_spec; std::shared_ptr<IGidToLidMapperFactory> _gidToLidMapperFactory; ReferenceMappings _referenceMappings; @@ -57,7 +58,7 @@ private: uint64_t getUniqueValueCount() const override; bool consider_compact_values(const CompactionStrategy &compactionStrategy); - void compact_worst_values(); + void compact_worst_values(const CompactionStrategy& compaction_strategy); bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy); IndicesCopyVector getIndicesCopy(uint32_t size) const; void removeReverseMapping(EntryRef oldRef, uint32_t lid); diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h new file mode 100644 index 00000000000..dda44fdcd96 --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::attribute { + +/* + * Class describing how to compact a reference attribute + */ +class ReferenceAttributeCompactionSpec { + bool _values; + bool _dictionary; +public: + ReferenceAttributeCompactionSpec() noexcept + : _values(false), + _dictionary(false) + { + } + ReferenceAttributeCompactionSpec(bool values_, bool dictionary_) noexcept + : _values(values_), + _dictionary(dictionary_) + { + } + bool values() const noexcept { return _values; } + bool dictionary() const noexcept { return _dictionary; } +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp index 4323e57f6b1..18805a7b20f 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp @@ -49,13 +49,16 @@ SingleValueEnumAttributeBase::remap_enum_store_refs(const EnumIndexRemapper& rem { // update _enumIndices with new EnumIndex values after enum store has been compacted. v.logEnumStoreEvent("reenumerate", "reserved"); - auto new_indexes = std::make_unique<vespalib::Array<EnumIndex>>(); - new_indexes->reserve(_enumIndices.capacity()); + vespalib::Array<EnumIndex> new_indexes; + new_indexes.reserve(_enumIndices.capacity()); v.logEnumStoreEvent("reenumerate", "start"); + auto& filter = remapper.get_entry_ref_filter(); for (uint32_t i = 0; i < _enumIndices.size(); ++i) { - EnumIndex old_index = _enumIndices[i]; - EnumIndex new_index = remapper.remap(old_index); - new_indexes->push_back_fast(new_index); + EnumIndex ref = _enumIndices[i]; + if (ref.valid() && filter.has(ref)) { + ref = remapper.remap(ref); + } + new_indexes.push_back_fast(ref); } v.logEnumStoreEvent("compactfixup", "drain"); { diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp index 398625891b6..dde853cbc90 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp @@ -125,8 +125,9 @@ SingleValueEnumAttribute<B>::onUpdateStat() { // update statistics vespalib::MemoryUsage total = _enumIndices.getMemoryUsage(); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); total.mergeGenerationHeldBytes(getGenerationHolder().getHeldBytes()); - total.merge(this->_enumStore.update_stat()); + total.merge(this->_enumStore.update_stat(compaction_strategy)); total.merge(this->getChangeVectorMemoryUsage()); mergeMemoryStats(total); this->updateStatistics(_enumIndices.size(), this->_enumStore.get_num_uniques(), total.allocatedBytes(), diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp index e56bd5aacb1..1083d0f4cb8 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp @@ -36,7 +36,8 @@ template <typename B> void SingleValueNumericPostingAttribute<B>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B> diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp index af31295d083..e77c59e915d 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp @@ -34,7 +34,8 @@ template <typename B> void SingleValueStringPostingAttributeT<B>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B> diff --git a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h index bb404f27709..cea251272dc 100644 --- a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h +++ b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h @@ -11,7 +11,7 @@ namespace search::common { * Interface for a component that has a lid space that can be compacted and shrunk. */ struct ICompactableLidSpace { - virtual ~ICompactableLidSpace() {} + virtual ~ICompactableLidSpace() = default; /** * Compacts the lid space down to the wanted given doc id limit. diff --git a/searchlib/src/vespa/searchlib/common/indexmetainfo.h b/searchlib/src/vespa/searchlib/common/indexmetainfo.h index 9b6c7c8e477..2ba8bebb698 100644 --- a/searchlib/src/vespa/searchlib/common/indexmetainfo.h +++ b/searchlib/src/vespa/searchlib/common/indexmetainfo.h @@ -15,7 +15,7 @@ public: bool valid; uint64_t syncToken; vespalib::string dirName; - Snapshot() : valid(false), syncToken(0), dirName() {} + Snapshot() noexcept : valid(false), syncToken(0), dirName() {} Snapshot(bool valid_, uint64_t syncToken_, const vespalib::string &dirName_) : valid(valid_), syncToken(syncToken_), dirName(dirName_) {} bool operator==(const Snapshot &rhs) const { diff --git a/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h index 181607c3fc6..1feabed1eb1 100644 --- a/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h +++ b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h @@ -23,7 +23,7 @@ class FieldLengthScanner { static uint16_t make_element_mask(uint32_t element_id) { return (1u << element_id); } public: - FieldLengthEntry() + FieldLengthEntry() noexcept : _field_length(0), _elements(0) { diff --git a/searchlib/src/vespa/searchlib/docstore/chunk.h b/searchlib/src/vespa/searchlib/docstore/chunk.h index d40ba27ef45..1190f40aa97 100644 --- a/searchlib/src/vespa/searchlib/docstore/chunk.h +++ b/searchlib/src/vespa/searchlib/docstore/chunk.h @@ -47,7 +47,7 @@ private: class LidMeta { public: - LidMeta() : _lid(0), _size(0) { } + LidMeta() noexcept : _lid(0), _size(0) { } LidMeta(uint32_t lid, uint32_t sz) : _lid(lid), _size(sz) { } uint32_t getLid() const { return _lid; } uint32_t size() const { return _size; } diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.cpp b/searchlib/src/vespa/searchlib/docstore/compacter.cpp index 38f3fbef0b0..26fb79f8a4e 100644 --- a/searchlib/src/vespa/searchlib/docstore/compacter.cpp +++ b/searchlib/src/vespa/searchlib/docstore/compacter.cpp @@ -26,7 +26,7 @@ BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, const Compress _bucketizer(bucketizer), _writeCount(0), _maxBucketGuardDuration(vespalib::duration::zero()), - _lastSample(), + _lastSample(vespalib::steady_clock::now()), _lock(), _backingMemory(Alloc::alloc(0x40000000), &_lock), _tmpStore(), diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp index 7aaee7180df..b4ff050c0f6 100644 --- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp @@ -112,7 +112,6 @@ public: } -using VisitCache = docstore::VisitCache; using docstore::Value; bool @@ -239,7 +238,14 @@ DocumentStore::remove(uint64_t syncToken, DocumentIdT lid) } void -DocumentStore::compact(uint64_t syncToken) +DocumentStore::compactBloat(uint64_t syncToken) +{ + (void) syncToken; + // Most implementations does not offer compact. +} + +void +DocumentStore::compactSpread(uint64_t syncToken) { (void) syncToken; // Most implementations does not offer compact. diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h index b6021d34bef..6402c16cd5e 100644 --- a/searchlib/src/vespa/searchlib/docstore/documentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h @@ -72,7 +72,8 @@ public: void remove(uint64_t syncToken, DocumentIdT lid) override; void flush(uint64_t syncToken) override; uint64_t initFlush(uint64_t synctoken) override; - void compact(uint64_t syncToken) override; + void compactBloat(uint64_t syncToken) override; + void compactSpread(uint64_t syncToken) override; uint64_t lastSyncToken() const override; uint64_t tentativeLastSyncToken() const override; vespalib::system_time getLastFlushTime() const override; @@ -80,7 +81,7 @@ public: size_t memoryUsed() const override { return _backingStore.memoryUsed(); } size_t getDiskFootprint() const override { return _backingStore.getDiskFootprint(); } size_t getDiskBloat() const override { return _backingStore.getDiskBloat(); } - size_t getMaxCompactGain() const override { return _backingStore.getMaxCompactGain(); } + size_t getMaxSpreadAsBloat() const override { return _backingStore.getMaxSpreadAsBloat(); } CacheStats getCacheStats() const override; size_t memoryMeta() const override { return _backingStore.memoryMeta(); } const vespalib::string & getBaseDir() const override { return _backingStore.getBaseDir(); } diff --git a/searchlib/src/vespa/searchlib/docstore/idatastore.h b/searchlib/src/vespa/searchlib/docstore/idatastore.h index b18bb0a3827..fc0eae1d15e 100644 --- a/searchlib/src/vespa/searchlib/docstore/idatastore.h +++ b/searchlib/src/vespa/searchlib/docstore/idatastore.h @@ -17,14 +17,14 @@ class IBufferVisitor; class IDataStoreVisitor { public: - virtual ~IDataStoreVisitor() { } + virtual ~IDataStoreVisitor() = default; virtual void visit(uint32_t lid, const void *buffer, size_t sz) = 0; }; class IDataStoreVisitorProgress { public: - virtual ~IDataStoreVisitorProgress() { } + virtual ~IDataStoreVisitorProgress() = default; virtual void updateProgress(double progress) = 0; }; @@ -46,11 +46,7 @@ public: * @param dirName The directory that will contain the data file. **/ IDataStore(const vespalib::string & dirName); - - /** - * Allow inhertitance. - **/ - virtual ~IDataStore(); + ~IDataStore() override; /** * Read data from the data store into a buffer. @@ -125,7 +121,7 @@ public: * to avoid misuse we let the report a more conservative number here if necessary. * @return diskspace to be gained. */ - virtual size_t getMaxCompactGain() const { return getDiskBloat(); } + virtual size_t getMaxSpreadAsBloat() const = 0; /** diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp index e1558f2238b..4f9b91f3e15 100644 --- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp @@ -5,10 +5,6 @@ namespace search { -IDocumentStore::IDocumentStore() = default; - -IDocumentStore::~IDocumentStore() = default; - void IDocumentStore::visit(const LidVector & lids, const document::DocumentTypeRepo &repo, IDocumentVisitor & visitor) const { for (uint32_t lid : lids) { visitor.visit(lid, read(lid, repo)); diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h index 2a7864a6f47..d84a5ad7e7e 100644 --- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h @@ -22,7 +22,7 @@ class IDocumentStoreReadVisitor { public: using DocumentSP = std::shared_ptr<document::Document>; - virtual ~IDocumentStoreReadVisitor() { } + virtual ~IDocumentStoreReadVisitor() = default; virtual void visit(uint32_t lid, const DocumentSP &doc) = 0; virtual void visit(uint32_t lid) = 0; }; @@ -31,14 +31,14 @@ class IDocumentStoreRewriteVisitor { public: using DocumentSP = std::shared_ptr<document::Document>; - virtual ~IDocumentStoreRewriteVisitor() { } + virtual ~IDocumentStoreRewriteVisitor() = default; virtual void visit(uint32_t lid, const DocumentSP &doc) = 0; }; class IDocumentStoreVisitorProgress { public: - virtual ~IDocumentStoreVisitorProgress() { } + virtual ~IDocumentStoreVisitorProgress() = default; virtual void updateProgress(double progress) = 0; }; @@ -47,7 +47,7 @@ class IDocumentVisitor { public: using DocumentUP = std::unique_ptr<document::Document>; - virtual ~IDocumentVisitor() { } + virtual ~IDocumentVisitor() = default; virtual void visit(uint32_t lid, DocumentUP doc) = 0; virtual bool allowVisitCaching() const = 0; private: @@ -68,17 +68,6 @@ public: using LidVector = std::vector<uint32_t>; using DocumentUP = std::unique_ptr<document::Document>; - - /** - * Construct a document store. - * - * @throws vespalib::IoException if the file is corrupt or other IO problems occur. - * @param docMan The document type manager to use when deserializing. - * @param baseDir The path to a directory where the implementaion specific files will reside. - **/ - IDocumentStore(); - virtual ~IDocumentStore(); - /** * Make a Document from a stored serialized data blob. * @param lid The local ID associated with the document. @@ -111,7 +100,8 @@ public: /** * If possible compact the disk. **/ - virtual void compact(uint64_t syncToken) = 0; + virtual void compactBloat(uint64_t syncToken) = 0; + virtual void compactSpread(uint64_t syncToken) = 0; /** * The sync token used for the last successful flush() operation, @@ -164,12 +154,11 @@ public: virtual size_t getDiskBloat() const = 0; /** - * Calculates how much diskspace can be compacted during a flush. - * default is to return th ebloat limit, but as some targets have some internal limits - * to avoid misuse we let the report a more conservative number here if necessary. - * @return diskspace to be gained. + * Calculates the gain from keeping buckets close. It is converted to diskbloat + * so it can be prioritized accordingly. + * @return spread as disk bloat. */ - virtual size_t getMaxCompactGain() const { return getDiskBloat(); } + virtual size_t getMaxSpreadAsBloat() const = 0; /** * Returns statistics about the cache. diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp index fd25dd56235..6a9ae40cc93 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp @@ -36,7 +36,6 @@ using namespace std::literals; LogDataStore::Config::Config() : _maxFileSize(DEFAULT_MAX_FILESIZE), - _maxDiskBloatFactor(0.2), _maxBucketSpread(2.5), _minFileSizeFactor(0.2), _maxNumLids(DEFAULT_MAX_LIDS_PER_FILE), @@ -48,7 +47,6 @@ LogDataStore::Config::Config() bool LogDataStore::Config::operator == (const Config & rhs) const { return (_maxBucketSpread == rhs._maxBucketSpread) && - (_maxDiskBloatFactor == rhs._maxDiskBloatFactor) && (_maxFileSize == rhs._maxFileSize) && (_minFileSizeFactor == rhs._minFileSizeFactor) && (_skipCrcOnRead == rhs._skipCrcOnRead) && @@ -294,46 +292,14 @@ vespalib::string bloatMsg(size_t bloat, size_t usage) { } -void -LogDataStore::compact(uint64_t syncToken) -{ - uint64_t usage = getDiskFootprint(); - uint64_t bloat = getDiskBloat(); - LOG(debug, "%s", bloatMsg(bloat, usage).c_str()); - const bool doCompact = (_fileChunks.size() > 1); - if (doCompact) { - LOG(info, "%s. Will compact", bloatMsg(bloat, usage).c_str()); - compactWorst(_config.getMaxDiskBloatFactor(), _config.getMaxBucketSpread(), isTotalDiskBloatExceeded(usage, bloat)); - } - flushActiveAndWait(syncToken); - if (doCompact) { - usage = getDiskFootprint(); - bloat = getDiskBloat(); - LOG(info, "Done compacting. %s", bloatMsg(bloat, usage).c_str()); - } -} - -bool -LogDataStore::isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const { - const size_t maxConfiguredDiskBloat = diskFootPrint * _config.getMaxDiskBloatFactor(); - return bloat > maxConfiguredDiskBloat; -} - size_t -LogDataStore::getMaxCompactGain() const +LogDataStore::getMaxSpreadAsBloat() const { - size_t bloat = getDiskBloat(); const size_t diskFootPrint = getDiskFootprint(); - if ( ! isTotalDiskBloatExceeded(diskFootPrint, bloat) ) { - bloat = 0; - } - const double maxSpread = getMaxBucketSpread(); - size_t spreadAsBloat = diskFootPrint * (1.0 - 1.0/maxSpread); - if ( maxSpread < _config.getMaxBucketSpread()) { - spreadAsBloat = 0; - } - return (bloat + spreadAsBloat); + return (maxSpread > _config.getMaxBucketSpread()) + ? diskFootPrint * (1.0 - 1.0/maxSpread) + : 0; } void @@ -380,40 +346,34 @@ LogDataStore::getMaxBucketSpread() const } std::pair<bool, LogDataStore::FileId> -LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat) +LogDataStore::findNextToCompact(bool dueToBloat) { typedef std::multimap<double, FileId, std::greater<double>> CostMap; - CostMap worstBloat; - CostMap worstSpread; + CostMap worst; MonitorGuard guard(_updateLock); for (size_t i(0); i < _fileChunks.size(); i++) { const auto & fc(_fileChunks[i]); if (fc && fc->frozen() && (_currentlyCompacting.find(fc->getNameId()) == _currentlyCompacting.end())) { uint64_t usage = fc->getDiskFootprint(); - uint64_t bloat = fc->getDiskBloat(); - if (_bucketizer) { - worstSpread.emplace(fc->getBucketSpread(), FileId(i)); - } - if (usage > 0) { - double tmp(double(bloat)/usage); - worstBloat.emplace(tmp, FileId(i)); + if ( ! dueToBloat && _bucketizer) { + worst.emplace(fc->getBucketSpread(), FileId(i)); + } else if (dueToBloat && usage > 0) { + double tmp(double(fc->getDiskBloat())/usage); + worst.emplace(tmp, FileId(i)); } } } if (LOG_WOULD_LOG(debug)) { - for (const auto & it : worstBloat) { + for (const auto & it : worst) { const FileChunk & fc = *_fileChunks[it.second.getId()]; LOG(debug, "File '%s' has bloat '%2.2f' and bucket-spread '%1.4f numChunks=%d , numBuckets=%ld, numUniqueBuckets=%ld", fc.getName().c_str(), it.first * 100, fc.getBucketSpread(), fc.getNumChunks(), fc.getNumBuckets(), fc.getNumUniqueBuckets()); } } std::pair<bool, FileId> retval(false, FileId(-1)); - if ( ! worstBloat.empty() && (worstBloat.begin()->first > bloatLimit) && prioritizeDiskBloat) { - retval.first = true; - retval.second = worstBloat.begin()->second; - } else if ( ! worstSpread.empty() && (worstSpread.begin()->first > spreadLimit)) { + if ( ! worst.empty()) { retval.first = true; - retval.second = worstSpread.begin()->second; + retval.second = worst.begin()->second; } if (retval.first) { _currentlyCompacting.insert(_fileChunks[retval.second.getId()]->getNameId()); @@ -422,10 +382,24 @@ LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prio } void -LogDataStore::compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat) { - auto worst = findNextToCompact(bloatLimit, spreadLimit, prioritizeDiskBloat); - if (worst.first) { - compactFile(worst.second); +LogDataStore::compactWorst(uint64_t syncToken, bool compactDiskBloat) { + uint64_t usage = getDiskFootprint(); + uint64_t bloat = getDiskBloat(); + const char * reason = compactDiskBloat ? "bloat" : "spread"; + LOG(debug, "%s", bloatMsg(bloat, usage).c_str()); + const bool doCompact = (_fileChunks.size() > 1); + if (doCompact) { + LOG(debug, "Will compact due to %s: %s", reason, bloatMsg(bloat, usage).c_str()); + auto worst = findNextToCompact(compactDiskBloat); + if (worst.first) { + compactFile(worst.second); + } + flushActiveAndWait(syncToken); + usage = getDiskFootprint(); + bloat = getDiskBloat(); + LOG(info, "Done compacting due to %s: %s", reason, bloatMsg(bloat, usage).c_str()); + } else { + flushActiveAndWait(syncToken); } } @@ -1001,7 +975,7 @@ LogDataStore::computeNumberOfSignificantBucketIdBits(const IBucketizer & bucketi while ((msb > 0) && (msbHistogram[msb - 1] == 0)) { msb--; } - LOG(info, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time()); + LOG(debug, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time()); return msb; } diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h index 0e11b88a178..62f87076759 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h @@ -41,7 +41,6 @@ public: Config & setMaxFileSize(size_t v) { _maxFileSize = v; return *this; } Config & setMaxNumLids(size_t v) { _maxNumLids = v; return *this; } - Config & setMaxDiskBloatFactor(double v) { _maxDiskBloatFactor = v; return *this; } Config & setMaxBucketSpread(double v) { _maxBucketSpread = v; return *this; } Config & setMinFileSizeFactor(double v) { _minFileSizeFactor = v; return *this; } @@ -49,7 +48,6 @@ public: Config & setFileConfig(WriteableFileChunk::Config v) { _fileConfig = v; return *this; } size_t getMaxFileSize() const { return _maxFileSize; } - double getMaxDiskBloatFactor() const { return _maxDiskBloatFactor; } double getMaxBucketSpread() const { return _maxBucketSpread; } double getMinFileSizeFactor() const { return _minFileSizeFactor; } uint32_t getMaxNumLids() const { return _maxNumLids; } @@ -63,7 +61,6 @@ public: bool operator == (const Config &) const; private: size_t _maxFileSize; - double _maxDiskBloatFactor; double _maxBucketSpread; double _minFileSizeFactor; uint32_t _maxNumLids; @@ -109,12 +106,10 @@ public: size_t getDiskFootprint() const override; size_t getDiskHeaderFootprint() const override; size_t getDiskBloat() const override; - size_t getMaxCompactGain() const override; + size_t getMaxSpreadAsBloat() const override; - /** - * Will compact the docsummary up to a lower limit of 5% bloat. - */ - void compact(uint64_t syncToken); + void compactBloat(uint64_t syncToken) { compactWorst(syncToken, true); } + void compactSpread(uint64_t syncToken) { compactWorst(syncToken, false);} const Config & getConfig() const { return _config; } Config & getConfig() { return _config; } @@ -183,10 +178,9 @@ private: class WrapVisitorProgress; class FileChunkHolder; - // Implements ISetLid API void setLid(const ISetLid::unique_lock & guard, uint32_t lid, const LidInfo & lm) override; - void compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat); + void compactWorst(uint64_t syncToken, bool compactDiskBloat); void compactFile(FileId chunkId); typedef vespalib::RcuVector<uint64_t> LidInfoVector; @@ -202,8 +196,6 @@ private: NameIdSet eraseIncompleteCompactedFiles(NameIdSet partList); void internalFlushAll(); - bool isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const; - NameIdSet scanDir(const vespalib::string &dir, const vespalib::string &suffix); FileId allocateFileId(const MonitorGuard & guard); void setNewFileChunk(const MonitorGuard & guard, FileChunk::UP fileChunk); @@ -248,7 +240,7 @@ private: return (_fileChunks.empty() ? 0 : _fileChunks.back()->getLastPersistedSerialNum()); } bool shouldCompactToActiveFile(size_t compactedSize) const; - std::pair<bool, FileId> findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat); + std::pair<bool, FileId> findNextToCompact(bool compactDiskBloat); void incGeneration(); bool canShrinkLidSpace(const MonitorGuard &guard) const; diff --git a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h index de36155bedb..2931f8bce2d 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h @@ -51,7 +51,8 @@ public: ~LogDocumentStore() override; void reconfigure(const Config & config); private: - void compact(uint64_t syncToken) override { _backingStore.compact(syncToken); } + void compactBloat(uint64_t syncToken) override { _backingStore.compactBloat(syncToken); } + void compactSpread(uint64_t syncToken) override { _backingStore.compactSpread(syncToken); } LogDataStore _backingStore; }; diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp index 4da09868216..8c76a4477f5 100644 --- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp +++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp @@ -483,12 +483,20 @@ WriteableFileChunk::writeData(const ProcessedChunkQ & chunks, size_t sz) void WriteableFileChunk::updateChunkInfo(const ProcessedChunkQ & chunks, const ChunkMetaV & cmetaV, size_t sz) { + uint32_t maxChunkId(0); + for (const auto & chunk : chunks) { + maxChunkId = std::max(chunk->getChunkId(), maxChunkId); + } std::lock_guard guard(_lock); + if (maxChunkId >= _chunkInfo.size()) { + _chunkInfo.reserve(vespalib::roundUp2inN(maxChunkId+1)); + } size_t nettoSz(sz); for (size_t i(0); i < chunks.size(); i++) { const ProcessedChunk & chunk = *chunks[i]; assert(_chunkMap.find(chunk.getChunkId()) == _chunkMap.begin()); const Chunk & active = *_chunkMap.begin()->second; + assert(active.getId() == chunk.getChunkId()); if (active.getId() >= _chunkInfo.size()) { _chunkInfo.resize(active.getId()+1); } diff --git a/searchlib/src/vespa/searchlib/expression/floatbucketresultnode.h b/searchlib/src/vespa/searchlib/expression/floatbucketresultnode.h index 080b4f0fc5c..7b89e90efe9 100644 --- a/searchlib/src/vespa/searchlib/expression/floatbucketresultnode.h +++ b/searchlib/src/vespa/searchlib/expression/floatbucketresultnode.h @@ -33,7 +33,7 @@ public: DECLARE_EXPRESSIONNODE(FloatBucketResultNode); DECLARE_NBO_SERIALIZE; - FloatBucketResultNode() : _from(0.0), _to(0.0) {} + FloatBucketResultNode() noexcept : _from(0.0), _to(0.0) {} FloatBucketResultNode(double from, double to) : _from(from), _to(to) {} size_t hash() const override; int onCmp(const Identifiable & b) const override; diff --git a/searchlib/src/vespa/searchlib/expression/floatresultnode.h b/searchlib/src/vespa/searchlib/expression/floatresultnode.h index 5bff232fe2f..c31f9a2de40 100644 --- a/searchlib/src/vespa/searchlib/expression/floatresultnode.h +++ b/searchlib/src/vespa/searchlib/expression/floatresultnode.h @@ -13,7 +13,7 @@ public: DECLARE_EXPRESSIONNODE(FloatResultNode); DECLARE_NBO_SERIALIZE; void visitMembers(vespalib::ObjectVisitor &visitor) const override; - FloatResultNode(double v=0) : _value(v) { } + FloatResultNode(double v=0) noexcept : _value(v) { } size_t hash() const override { size_t tmpHash(0); memcpy(&tmpHash, &_value, sizeof(tmpHash)); return tmpHash; } int onCmp(const Identifiable & b) const override; void add(const ResultNode & b) override; diff --git a/searchlib/src/vespa/searchlib/expression/integerbucketresultnode.h b/searchlib/src/vespa/searchlib/expression/integerbucketresultnode.h index d5a2306200c..95a4555e6e4 100644 --- a/searchlib/src/vespa/searchlib/expression/integerbucketresultnode.h +++ b/searchlib/src/vespa/searchlib/expression/integerbucketresultnode.h @@ -31,7 +31,7 @@ private: public: DECLARE_EXPRESSIONNODE(IntegerBucketResultNode); DECLARE_NBO_SERIALIZE; - IntegerBucketResultNode() : _from(0), _to(0) {} + IntegerBucketResultNode() noexcept : _from(0), _to(0) {} IntegerBucketResultNode(int64_t from, int64_t to) : _from(from), _to(to) {} size_t hash() const override; int onCmp(const Identifiable & b) const override; diff --git a/searchlib/src/vespa/searchlib/expression/stringresultnode.h b/searchlib/src/vespa/searchlib/expression/stringresultnode.h index b9d127a2e2c..79d849bdd15 100644 --- a/searchlib/src/vespa/searchlib/expression/stringresultnode.h +++ b/searchlib/src/vespa/searchlib/expression/stringresultnode.h @@ -12,7 +12,7 @@ public: DECLARE_EXPRESSIONNODE(StringResultNode); DECLARE_NBO_SERIALIZE; void visitMembers(vespalib::ObjectVisitor &visitor) const override; - StringResultNode(const char * v="") : _value(v) { } + StringResultNode(const char * v="") noexcept : _value(v) { } StringResultNode(vespalib::stringref v) : _value(v) { } size_t hash() const override; int onCmp(const Identifiable & b) const override; diff --git a/searchlib/src/vespa/searchlib/features/nativefieldmatchfeature.h b/searchlib/src/vespa/searchlib/features/nativefieldmatchfeature.h index 684857e5a7c..d9375f12d54 100644 --- a/searchlib/src/vespa/searchlib/features/nativefieldmatchfeature.h +++ b/searchlib/src/vespa/searchlib/features/nativefieldmatchfeature.h @@ -14,7 +14,7 @@ namespace search::features { struct NativeFieldMatchParam : public NativeParamBase { static const uint32_t NOT_DEF_FIELD_LENGTH; - NativeFieldMatchParam() : NativeParamBase(), firstOccTable(NULL), numOccTable(NULL), averageFieldLength(NOT_DEF_FIELD_LENGTH), firstOccImportance(0.5) { } + NativeFieldMatchParam() noexcept : NativeParamBase(), firstOccTable(NULL), numOccTable(NULL), averageFieldLength(NOT_DEF_FIELD_LENGTH), firstOccImportance(0.5) { } const fef::Table * firstOccTable; const fef::Table * numOccTable; uint32_t averageFieldLength; diff --git a/searchlib/src/vespa/searchlib/features/nativeproximityfeature.h b/searchlib/src/vespa/searchlib/features/nativeproximityfeature.h index 0c61ddf9bae..e40779dae14 100644 --- a/searchlib/src/vespa/searchlib/features/nativeproximityfeature.h +++ b/searchlib/src/vespa/searchlib/features/nativeproximityfeature.h @@ -13,7 +13,7 @@ namespace search::features { **/ struct NativeProximityParam : public NativeParamBase { - NativeProximityParam() : NativeParamBase(), proximityTable(NULL), revProximityTable(NULL), proximityImportance(0.5) { } + NativeProximityParam() noexcept : NativeParamBase(), proximityTable(NULL), revProximityTable(NULL), proximityImportance(0.5) { } const fef::Table * proximityTable; const fef::Table * revProximityTable; feature_t proximityImportance; diff --git a/searchlib/src/vespa/searchlib/fef/test/ftlib.h b/searchlib/src/vespa/searchlib/fef/test/ftlib.h index 437f2d330ec..d4a18fcd40e 100644 --- a/searchlib/src/vespa/searchlib/fef/test/ftlib.h +++ b/searchlib/src/vespa/searchlib/fef/test/ftlib.h @@ -127,7 +127,7 @@ private: struct FtQueryTerm { FtQueryTerm(const vespalib::string t, uint32_t tw = 100, feature_t co = 0.1f, feature_t si = 0.1f) : term(t), termWeight(tw), connexity(co), significance(si) {} - FtQueryTerm() : term(), termWeight(100), connexity(0.1f), significance(0.1f) {} + FtQueryTerm() noexcept : term(), termWeight(100), connexity(0.1f), significance(0.1f) {} vespalib::string term; search::query::Weight termWeight; feature_t connexity; diff --git a/searchlib/src/vespa/searchlib/grouping/collect.h b/searchlib/src/vespa/searchlib/grouping/collect.h index 6d899723f1a..55b5ea3ddd4 100644 --- a/searchlib/src/vespa/searchlib/grouping/collect.h +++ b/searchlib/src/vespa/searchlib/grouping/collect.h @@ -90,7 +90,7 @@ private: typedef vespalib::Array<ResultAccessor> ResultAccessorList; class SortInfo { public: - SortInfo() : _index(0), _sign(1) { } + SortInfo() noexcept : _index(0), _sign(1) { } SortInfo(uint8_t index, int8_t sign) : _index(index), _sign(sign) { } uint8_t getIndex() const { return _index; } int8_t getSign() const { return _sign; } diff --git a/searchlib/src/vespa/searchlib/grouping/groupref.h b/searchlib/src/vespa/searchlib/grouping/groupref.h index 49e56ed5bed..78331e4caaf 100644 --- a/searchlib/src/vespa/searchlib/grouping/groupref.h +++ b/searchlib/src/vespa/searchlib/grouping/groupref.h @@ -9,7 +9,7 @@ namespace grouping { class GroupRef { public: - GroupRef() : _ref(-1) { } + GroupRef() noexcept : _ref(-1) { } GroupRef(uint32_t ref) : _ref(ref) { } uint32_t getRef() const { return _ref; } bool valid() const { return _ref != static_cast<uint32_t>(-1); } diff --git a/searchlib/src/vespa/searchlib/index/bitvectorkeys.h b/searchlib/src/vespa/searchlib/index/bitvectorkeys.h index cacb0f59721..332b0ed3524 100644 --- a/searchlib/src/vespa/searchlib/index/bitvectorkeys.h +++ b/searchlib/src/vespa/searchlib/index/bitvectorkeys.h @@ -9,7 +9,7 @@ struct BitVectorWordSingleKey { uint32_t _numDocs; uint32_t _pad; - BitVectorWordSingleKey() + BitVectorWordSingleKey() noexcept : _wordNum(0), _numDocs(0), _pad(0) diff --git a/searchlib/src/vespa/searchlib/index/docbuilder.h b/searchlib/src/vespa/searchlib/index/docbuilder.h index 2b60446bd4e..2ee28c90827 100644 --- a/searchlib/src/vespa/searchlib/index/docbuilder.h +++ b/searchlib/src/vespa/searchlib/index/docbuilder.h @@ -253,7 +253,7 @@ private: }; const Schema & _schema; - document::DocumenttypesConfig _doctypes_config; + document::config::DocumenttypesConfig _doctypes_config; std::shared_ptr<const document::DocumentTypeRepo> _repo; const document::DocumentType &_docType; document::Document::UP _doc; // the document we are about to generate @@ -300,7 +300,7 @@ public: const document::DocumentType &getDocumentType() const { return _docType; } const std::shared_ptr<const document::DocumentTypeRepo> &getDocumentTypeRepo() const { return _repo; } - document::DocumenttypesConfig getDocumenttypesConfig() const { return _doctypes_config; } + document::config::DocumenttypesConfig getDocumenttypesConfig() const { return _doctypes_config; } }; } diff --git a/searchlib/src/vespa/searchlib/index/doctypebuilder.cpp b/searchlib/src/vespa/searchlib/index/doctypebuilder.cpp index 9f0a44d2b73..1ddfbff54a8 100644 --- a/searchlib/src/vespa/searchlib/index/doctypebuilder.cpp +++ b/searchlib/src/vespa/searchlib/index/doctypebuilder.cpp @@ -43,9 +43,9 @@ DataType::Type convert(Schema::DataType type) { } void -insertStructType(document::DocumenttypesConfig::Documenttype & cfg, const StructDataType & structType) +insertStructType(document::config::DocumenttypesConfig::Documenttype & cfg, const StructDataType & structType) { - typedef document::DocumenttypesConfig DTC; + typedef document::config::DocumenttypesConfig DTC; DTC::Documenttype::Datatype::Sstruct cfgStruct; cfgStruct.name = structType.getName(); Field::Set fieldSet = structType.getFieldSet(); @@ -97,7 +97,7 @@ DocTypeBuilder::DocTypeBuilder(const Schema &schema) _iFields.setup(schema); } -document::DocumenttypesConfig DocTypeBuilder::makeConfig() const { +document::config::DocumenttypesConfig DocTypeBuilder::makeConfig() const { using namespace document::config_builder; TypeCache type_cache; @@ -168,10 +168,10 @@ document::DocumenttypesConfig DocTypeBuilder::makeConfig() const { return builder.config(); } -document::DocumenttypesConfig +document::config::DocumenttypesConfig DocTypeBuilder::makeConfig(const DocumentType &docType) { - typedef document::DocumenttypesConfigBuilder DTC; + typedef document::config::DocumenttypesConfigBuilder DTC; DTC cfg; { // document type DTC::Documenttype dtype; diff --git a/searchlib/src/vespa/searchlib/index/doctypebuilder.h b/searchlib/src/vespa/searchlib/index/doctypebuilder.h index 98b52e955fc..c66ae66e250 100644 --- a/searchlib/src/vespa/searchlib/index/doctypebuilder.h +++ b/searchlib/src/vespa/searchlib/index/doctypebuilder.h @@ -20,9 +20,9 @@ class DocTypeBuilder { public: DocTypeBuilder(const Schema & schema); - document::DocumenttypesConfig makeConfig() const; + document::config::DocumenttypesConfig makeConfig() const; - static document::DocumenttypesConfig + static document::config::DocumenttypesConfig makeConfig(const document::DocumentType &docType); }; diff --git a/searchlib/src/vespa/searchlib/query/streaming/queryterm.h b/searchlib/src/vespa/searchlib/query/streaming/queryterm.h index 6a64aa561e4..d160db9784e 100644 --- a/searchlib/src/vespa/searchlib/query/streaming/queryterm.h +++ b/searchlib/src/vespa/searchlib/query/streaming/queryterm.h @@ -39,7 +39,7 @@ public: }; class FieldInfo { public: - FieldInfo() : _hitListOffset(0), _hitCount(0), _fieldLength(0) { } + FieldInfo() noexcept : _hitListOffset(0), _hitCount(0), _fieldLength(0) { } FieldInfo(uint32_t hitListOffset, uint32_t hitCount, uint32_t fieldLength) : _hitListOffset(hitListOffset), _hitCount(hitCount), _fieldLength(fieldLength) { } size_t getHitOffset() const { return _hitListOffset; } diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp index 0f4326aac40..113883a307f 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp @@ -75,7 +75,7 @@ BlobSequenceReader::BlobSequenceReader(AttributeVector& attr, bool has_index) : ReaderBase(attr), _use_index_file(has_index && has_index_file(attr) && can_use_index_save_file(attr.getConfig(), - search::attribute::AttributeHeader::extractTags(getDatHeader()))), + search::attribute::AttributeHeader::extractTags(getDatHeader(), attr.getBaseFileName()))), _index_file(_use_index_file ? attribute::LoadUtils::openFile(attr, DenseTensorAttributeSaver::index_file_suffix()) : std::unique_ptr<Fast_BufferedFile>()) @@ -132,7 +132,7 @@ DenseTensorAttribute::update_stat() { vespalib::MemoryUsage result = TensorAttribute::update_stat(); if (_index) { - result.merge(_index->memory_usage()); + result.merge(_index->update_stat(getConfig().getCompactionStrategy())); } return result; } diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp index d3c2998333a..ed3fb737b7d 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp @@ -17,6 +17,8 @@ namespace { constexpr size_t MIN_BUFFER_ARRAYS = 1024; constexpr size_t DENSE_TENSOR_ALIGNMENT = 32; +constexpr size_t DENSE_TENSOR_ALIGNMENT_SMALL = 16; +constexpr size_t DENSE_TENSOR_ALIGNMENT_MIN = 8; size_t my_align(size_t size, size_t alignment) { size += alignment - 1; @@ -27,17 +29,20 @@ size_t my_align(size_t size, size_t alignment) { DenseTensorStore::TensorSizeCalc::TensorSizeCalc(const ValueType &type) : _numCells(1u), - _cell_type(type.cell_type()) + _cell_type(type.cell_type()), + _aligned_size(0u) { for (const auto &dim: type.dimensions()) { _numCells *= dim.size; } -} - -size_t -DenseTensorStore::TensorSizeCalc::alignedSize() const -{ - return my_align(bufSize(), DENSE_TENSOR_ALIGNMENT); + auto buf_size = bufSize(); + size_t alignment = DENSE_TENSOR_ALIGNMENT; + if (buf_size <= DENSE_TENSOR_ALIGNMENT_MIN) { + alignment = DENSE_TENSOR_ALIGNMENT_MIN; + } else if (buf_size <= DENSE_TENSOR_ALIGNMENT_SMALL) { + alignment = DENSE_TENSOR_ALIGNMENT_SMALL; + } + _aligned_size = my_align(buf_size, alignment); } DenseTensorStore::BufferType::BufferType(const TensorSizeCalc &tensorSizeCalc, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator) @@ -79,12 +84,6 @@ DenseTensorStore::~DenseTensorStore() _store.dropBuffers(); } -const void * -DenseTensorStore::getRawBuffer(RefType ref) const -{ - return _store.getEntryArray<char>(ref, _bufferType.getArraySize()); -} - namespace { void clearPadAreaAfterBuffer(char *buffer, size_t bufSize, size_t alignedBufSize) { @@ -136,15 +135,6 @@ DenseTensorStore::getTensor(EntryRef ref) const return std::make_unique<vespalib::eval::DenseValueView>(_type, cells_ref); } -vespalib::eval::TypedCells -DenseTensorStore::get_typed_cells(EntryRef ref) const -{ - if (!ref.valid()) { - return vespalib::eval::TypedCells(&_emptySpace[0], _type.cell_type(), getNumCells()); - } - return vespalib::eval::TypedCells(getRawBuffer(ref), _type.cell_type(), getNumCells()); -} - template <class TensorType> TensorStore::EntryRef DenseTensorStore::setDenseTensor(const TensorType &tensor) diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h index 3b7cb71863e..47932fbff7e 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h @@ -25,12 +25,13 @@ public: { size_t _numCells; // product of dimension sizes vespalib::eval::CellType _cell_type; + size_t _aligned_size; TensorSizeCalc(const ValueType &type); size_t bufSize() const { return vespalib::eval::CellTypeUtils::mem_size(_cell_type, _numCells); } - size_t alignedSize() const; + size_t alignedSize() const noexcept { return _aligned_size; } }; class BufferType : public vespalib::datastore::BufferType<char> @@ -50,12 +51,9 @@ private: ValueType _type; // type of dense tensor std::vector<char> _emptySpace; - size_t unboundCells(const void *buffer) const; - template <class TensorType> TensorStore::EntryRef setDenseTensor(const TensorType &tensor); - public: DenseTensorStore(const ValueType &type, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator); ~DenseTensorStore() override; @@ -63,12 +61,17 @@ public: const ValueType &type() const { return _type; } size_t getNumCells() const { return _tensorSizeCalc._numCells; } size_t getBufSize() const { return _tensorSizeCalc.bufSize(); } - const void *getRawBuffer(RefType ref) const; + const void *getRawBuffer(RefType ref) const { + return _store.getEntryArray<char>(ref, _bufferType.getArraySize()); + } vespalib::datastore::Handle<char> allocRawBuffer(); void holdTensor(EntryRef ref) override; EntryRef move(EntryRef ref) override; std::unique_ptr<vespalib::eval::Value> getTensor(EntryRef ref) const; - vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const; + vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const { + return vespalib::eval::TypedCells(ref.valid() ? getRawBuffer(ref) : &_emptySpace[0], + _type.cell_type(), getNumCells()); + } EntryRef setTensor(const vespalib::eval::Value &tensor); // The following method is meant to be used only for unit tests. uint32_t getArraySize() const { return _bufferType.getArraySize(); } diff --git a/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp b/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp index 315d4c8535c..96dfc580d87 100644 --- a/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp +++ b/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp @@ -21,6 +21,7 @@ make_distance_function(DistanceMetric variant, CellType cell_type) switch (cell_type) { case CellType::FLOAT: return std::make_unique<SquaredEuclideanDistanceHW<float>>(); case CellType::DOUBLE: return std::make_unique<SquaredEuclideanDistanceHW<double>>(); + case CellType::INT8: return std::make_unique<SquaredEuclideanDistanceHW<vespalib::eval::Int8Float>>(); default: return std::make_unique<SquaredEuclideanDistance>(CellType::FLOAT); } case DistanceMetric::Angular: diff --git a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h index 517ef68511b..6505ea119ea 100644 --- a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h +++ b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h @@ -44,6 +44,9 @@ public: assert(expected_cell_type() == vespalib::eval::get_cell_type<FloatType>()); } + static const double *cast(const double * p) { return p; } + static const float *cast(const float * p) { return p; } + static const int8_t *cast(const vespalib::eval::Int8Float * p) { return reinterpret_cast<const int8_t *>(p); } double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override { constexpr vespalib::eval::CellType expected = vespalib::eval::get_cell_type<FloatType>(); assert(lhs.type == expected && rhs.type == expected); @@ -51,7 +54,7 @@ public: auto rhs_vector = rhs.typify<FloatType>(); size_t sz = lhs_vector.size(); assert(sz == rhs_vector.size()); - return _computer.squaredEuclideanDistance(&lhs_vector[0], &rhs_vector[0], sz); + return _computer.squaredEuclideanDistance(cast(&lhs_vector[0]), cast(&rhs_vector[0]), sz); } double calc_with_limit(const vespalib::eval::TypedCells& lhs, diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp index 7f9f20e07c4..43596478a6f 100644 --- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp @@ -43,4 +43,13 @@ HammingDistance::calc(const vespalib::eval::TypedCells& lhs, } } +double +HammingDistance::calc_with_limit(const vespalib::eval::TypedCells& lhs, + const vespalib::eval::TypedCells& rhs, + double) const +{ + // consider optimizing: + return calc(lhs, rhs); +} + } diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h index f0b7b159b90..c64fc5b532d 100644 --- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h +++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h @@ -15,7 +15,7 @@ namespace search::tensor { * or (for int8 cells, aka binary data only) * "number of bits that are different" */ -class HammingDistance : public DistanceFunction { +class HammingDistance final : public DistanceFunction { public: HammingDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {} double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override; @@ -26,13 +26,7 @@ public: double score = 1.0 / (1.0 + distance); return score; } - double calc_with_limit(const vespalib::eval::TypedCells& lhs, - const vespalib::eval::TypedCells& rhs, - double) const override - { - // consider optimizing: - return calc(lhs, rhs); - } + double calc_with_limit(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs, double) const override; }; } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp index 847ed330fe8..c99e059815b 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp @@ -8,7 +8,6 @@ #include "hnsw_index_saver.h" #include "random_level_generator.h" #include "reusable_set_visited_tracker.h" -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchlib/attribute/address_space_components.h> #include <vespa/searchlib/attribute/address_space_usage.h> #include <vespa/searchlib/util/fileutil.h> @@ -16,6 +15,7 @@ #include <vespa/vespalib/data/slime/cursor.h> #include <vespa/vespalib/data/slime/inserter.h> #include <vespa/vespalib/datastore/array_store.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/util/memory_allocator.h> #include <vespa/vespalib/util/rcuvector.hpp> #include <vespa/vespalib/util/size_literals.h> @@ -30,6 +30,7 @@ namespace search::tensor { using search::AddressSpaceComponents; using search::StateExplorerUtils; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace { @@ -191,6 +192,7 @@ HnswIndex::remove_link_to(uint32_t remove_from, uint32_t remove_id, uint32_t lev { LinkArray new_links; auto old_links = _graph.get_link_array(remove_from, level); + new_links.reserve(old_links.size()); for (uint32_t id : old_links) { if (id != remove_id) new_links.push_back(id); } @@ -336,10 +338,7 @@ HnswIndex::HnswIndex(const DocVectorAccess& vectors, DistanceFunction::UP distan _level_generator(std::move(level_generator)), _cfg(cfg), _visited_set_pool(), - _cached_level_arrays_memory_usage(), - _cached_level_arrays_address_space_usage(0, 0, (1ull << 32)), - _cached_link_arrays_memory_usage(), - _cached_link_arrays_address_space_usage(0, 0, (1ull << 32)) + _compaction_spec() { assert(_distance_func); } @@ -530,18 +529,18 @@ HnswIndex::trim_hold_lists(generation_t first_used_gen) } void -HnswIndex::compact_level_arrays(bool compact_memory, bool compact_address_space) +HnswIndex::compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto context = _graph.nodes.compactWorst(compact_memory, compact_address_space); + auto context = _graph.nodes.compactWorst(compaction_spec, compaction_strategy); uint32_t doc_id_limit = _graph.node_refs.size(); vespalib::ArrayRef<AtomicEntryRef> refs(&_graph.node_refs[0], doc_id_limit); context->compact(refs); } void -HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space) +HnswIndex::compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto context = _graph.links.compactWorst(compact_memory, compact_address_space); + auto context = _graph.links.compactWorst(compaction_spec, compaction_strategy); uint32_t doc_id_limit = _graph.node_refs.size(); for (uint32_t doc_id = 1; doc_id < doc_id_limit; ++doc_id) { EntryRef level_ref = _graph.node_refs[doc_id].load_relaxed(); @@ -552,40 +551,24 @@ HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space) } } -namespace { - bool -consider_compact_arrays(const CompactionStrategy& compaction_strategy, vespalib::MemoryUsage& memory_usage, vespalib::AddressSpace& address_space_usage, std::function<void(bool,bool)> compact_arrays) -{ - size_t used_bytes = memory_usage.usedBytes(); - size_t dead_bytes = memory_usage.deadBytes(); - bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes); - size_t used_address_space = address_space_usage.used(); - size_t dead_address_space = address_space_usage.dead(); - bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space); - if (compact_memory || compact_address_space) { - compact_arrays(compact_memory, compact_address_space); +HnswIndex::consider_compact_level_arrays(const CompactionStrategy& compaction_strategy) +{ + if (_compaction_spec.level_arrays().compact()) { + compact_level_arrays(_compaction_spec.level_arrays(), compaction_strategy); return true; } return false; } -} - -bool -HnswIndex::consider_compact_level_arrays(const CompactionStrategy& compaction_strategy) -{ - return consider_compact_arrays(compaction_strategy, _cached_level_arrays_memory_usage, _cached_level_arrays_address_space_usage, - [this](bool compact_memory, bool compact_address_space) - { compact_level_arrays(compact_memory, compact_address_space); }); -} - bool HnswIndex::consider_compact_link_arrays(const CompactionStrategy& compaction_strategy) { - return consider_compact_arrays(compaction_strategy, _cached_link_arrays_memory_usage, _cached_link_arrays_address_space_usage, - [this](bool compact_memory, bool compact_address_space) - { compact_link_arrays(compact_memory, compact_address_space); }); + if (_compaction_spec.link_arrays().compact()) { + compact_link_arrays(_compaction_spec.link_arrays(), compaction_strategy); + return true; + } + return false; } bool @@ -602,16 +585,18 @@ HnswIndex::consider_compact(const CompactionStrategy& compaction_strategy) } vespalib::MemoryUsage -HnswIndex::update_stat() +HnswIndex::update_stat(const CompactionStrategy& compaction_strategy) { vespalib::MemoryUsage result; result.merge(_graph.node_refs.getMemoryUsage()); - _cached_level_arrays_memory_usage = _graph.nodes.getMemoryUsage(); - _cached_level_arrays_address_space_usage = _graph.nodes.addressSpaceUsage(); - result.merge(_cached_level_arrays_memory_usage); - _cached_link_arrays_memory_usage = _graph.links.getMemoryUsage(); - _cached_link_arrays_address_space_usage = _graph.links.addressSpaceUsage(); - result.merge(_cached_link_arrays_memory_usage); + auto level_arrays_memory_usage = _graph.nodes.getMemoryUsage(); + auto level_arrays_address_space_usage = _graph.nodes.addressSpaceUsage(); + result.merge(level_arrays_memory_usage); + auto link_arrays_memory_usage = _graph.links.getMemoryUsage(); + auto link_arrays_address_space_usage = _graph.links.addressSpaceUsage(); + _compaction_spec = HnswIndexCompactionSpec(compaction_strategy.should_compact(level_arrays_memory_usage, level_arrays_address_space_usage), + compaction_strategy.should_compact(link_arrays_memory_usage, link_arrays_address_space_usage)); + result.merge(link_arrays_memory_usage); result.merge(_visited_set_pool.memory_usage()); return result; } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h index d8f3c4c97fa..f607af587b5 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h @@ -13,6 +13,7 @@ #include <vespa/searchlib/common/bitvector.h> #include <vespa/vespalib/datastore/array_store.h> #include <vespa/vespalib/datastore/atomic_entry_ref.h> +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/util/rcuvector.h> #include <vespa/vespalib/util/reusable_set_pool.h> @@ -61,6 +62,25 @@ public: bool heuristic_select_neighbors() const { return _heuristic_select_neighbors; } }; + class HnswIndexCompactionSpec { + CompactionSpec _level_arrays; + CompactionSpec _link_arrays; + + public: + HnswIndexCompactionSpec() + : _level_arrays(), + _link_arrays() + { + } + HnswIndexCompactionSpec(CompactionSpec level_arrays_, CompactionSpec link_arrays_) + : _level_arrays(level_arrays_), + _link_arrays(link_arrays_) + { + } + CompactionSpec level_arrays() const noexcept { return _level_arrays; } + CompactionSpec link_arrays() const noexcept { return _link_arrays; } + }; + protected: using AtomicEntryRef = HnswGraph::AtomicEntryRef; using NodeStore = HnswGraph::NodeStore; @@ -80,10 +100,7 @@ protected: RandomLevelGenerator::UP _level_generator; Config _cfg; mutable vespalib::ReusableSetPool _visited_set_pool; - vespalib::MemoryUsage _cached_level_arrays_memory_usage; - vespalib::AddressSpace _cached_level_arrays_address_space_usage; - vespalib::MemoryUsage _cached_link_arrays_memory_usage; - vespalib::AddressSpace _cached_link_arrays_address_space_usage; + HnswIndexCompactionSpec _compaction_spec; uint32_t max_links_for_level(uint32_t level) const; void add_link_to(uint32_t docid, uint32_t level, const LinkArrayRef& old_links, uint32_t new_link) { @@ -171,12 +188,12 @@ public: void remove_document(uint32_t docid) override; void transfer_hold_lists(generation_t current_gen) override; void trim_hold_lists(generation_t first_used_gen) override; - void compact_level_arrays(bool compact_memory, bool compact_addreess_space); - void compact_link_arrays(bool compact_memory, bool compact_address_space); + void compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); + void compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); bool consider_compact_level_arrays(const CompactionStrategy& compaction_strategy); bool consider_compact_link_arrays(const CompactionStrategy& compaction_strategy); bool consider_compact(const CompactionStrategy& compaction_strategy) override; - vespalib::MemoryUsage update_stat() override; + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) override; vespalib::MemoryUsage memory_usage() const override; void populate_address_space_usage(search::AddressSpaceUsage& usage) const override; void get_state(const vespalib::slime::Inserter& inserter) const override; diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h index 411d09cd2d3..530d3e1036d 100644 --- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h +++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h @@ -12,6 +12,10 @@ class FastOS_FileInterface; +namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; +} namespace vespalib::slime { struct Inserter; } namespace search::fileutil { class LoadedBuffer; } @@ -19,7 +23,6 @@ namespace search::fileutil { class LoadedBuffer; } namespace search { class AddressSpaceUsage; class BitVector; -class CompactionStrategy; } namespace search::tensor { @@ -32,6 +35,8 @@ class NearestNeighborIndexSaver; */ class NearestNeighborIndex { public: + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using generation_t = vespalib::GenerationHandler::generation_t; struct Neighbor { uint32_t docid; @@ -67,7 +72,7 @@ public: virtual void transfer_hold_lists(generation_t current_gen) = 0; virtual void trim_hold_lists(generation_t first_used_gen) = 0; virtual bool consider_compact(const CompactionStrategy& compaction_strategy) = 0; - virtual vespalib::MemoryUsage update_stat() = 0; + virtual vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) = 0; virtual vespalib::MemoryUsage memory_usage() const = 0; virtual void populate_address_space_usage(search::AddressSpaceUsage& usage) const = 0; virtual void get_state(const vespalib::slime::Inserter& inserter) const = 0; diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp index a19541072da..5bd14d2c234 100644 --- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp @@ -89,9 +89,7 @@ TensorAttribute::onCommit() incGeneration(); if (getFirstUsedGeneration() > _compactGeneration) { // No data held from previous compact operation - size_t used = _cached_tensor_store_memory_usage.usedBytes(); - size_t dead = _cached_tensor_store_memory_usage.deadBytes(); - if (getConfig().getCompactionStrategy().should_compact_memory(used, dead)) { + if (getConfig().getCompactionStrategy().should_compact_memory(_cached_tensor_store_memory_usage)) { compactWorst(); } } diff --git a/searchlib/src/vespa/searchlib/transactionlog/chunks.cpp b/searchlib/src/vespa/searchlib/transactionlog/chunks.cpp index ac17c47ef38..cdd58adf005 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/chunks.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/chunks.cpp @@ -43,9 +43,8 @@ toCompression(CompressionConfig::Type type) { case CompressionConfig::LZ4: return Encoding::Compression::lz4; case CompressionConfig::NONE_MULTI: - return Encoding::Compression::none_multi; case CompressionConfig::NONE: - return Encoding::Compression::none; + return Encoding::Compression::none_multi; default: abort(); } @@ -114,9 +113,6 @@ XXH64CompressedChunk::compress(nbostream & os, Encoding::Crc crc) const { CompressionConfig cfg(_type, _level, 80, 200); ConstBufferRef uncompressed(org.data(), org.size()); Encoding::Compression actual = toCompression(::compress(cfg, uncompressed, compressed, false)); - if (actual == Encoding::Compression::none) { - actual = Encoding::Compression::none_multi; - } os << uint32_t(uncompressed.size()); size_t start = os.wp(); os.write(compressed.getData(), compressed.getDataLen()); diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp index 25ada00e7fb..96b94955570 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp @@ -7,10 +7,12 @@ #include <vespa/vespalib/io/fileutil.h> #include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/size_literals.h> +#include <vespa/vespalib/util/retain_guard.h> #include <vespa/fastos/file.h> #include <algorithm> #include <thread> #include <cassert> +#include <future> #include <vespa/log/log.h> #include <vespa/vespalib/util/threadstackexecutor.h> @@ -55,44 +57,44 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec _fileHeaderContext(fileHeaderContext), _markedDeleted(false) { - int retval(0); - if ((retval = makeDirectory(_baseDir.c_str())) != 0) { + assert(_config.getEncoding().getCompression() != Encoding::Compression::none); + int retval = makeDirectory(_baseDir.c_str()); + if (retval != 0) { throw runtime_error(fmt("Failed creating basedirectory %s r(%d), e(%d)", _baseDir.c_str(), retval, errno)); } - if ((retval = makeDirectory(dir().c_str())) != 0) { + retval = makeDirectory(dir().c_str()); + if (retval != 0) { throw runtime_error(fmt("Failed creating domaindir %s r(%d), e(%d)", dir().c_str(), retval, errno)); } SerialNumList partIdVector = scanDir(); const SerialNum lastPart = partIdVector.empty() ? 0 : partIdVector.back(); + vespalib::MonitoredRefCount pending; for (const SerialNum partId : partIdVector) { if ( partId != std::numeric_limits<SerialNum>::max()) { - _executor.execute(makeLambdaTask([this, partId, lastPart]() { addPart(partId, partId == lastPart); })); + _executor.execute(makeLambdaTask([this, partId, lastPart, refGuard=vespalib::RetainGuard(pending)]() { + (void) refGuard; + addPart(partId, partId == lastPart); + })); } } - _executor.sync(); + pending.waitForZeroRefCount(); if (_parts.empty() || _parts.crbegin()->second->isClosed()) { - _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, false); + _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _fileHeaderContext, false); vespalib::File::sync(dir()); } _lastSerial = end(); } -vespalib::Executor::Task::UP -Domain::execute(vespalib::Executor::Task::UP task) { - return _executor.execute(std::move(task)); -} - Domain & Domain::setConfig(const DomainConfig & cfg) { _config = cfg; + assert(_config.getEncoding().getCompression() != Encoding::Compression::none); return *this; } void Domain::addPart(SerialNum partId, bool isLastPart) { - auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, isLastPart); + auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _fileHeaderContext, isLastPart); if (dp->size() == 0) { // Only last domain part is allowed to be truncated down to // empty size. @@ -331,8 +333,7 @@ Domain::optionallyRotateFile(SerialNum serialNum) { triggerSyncNow({}); waitPendingSync(_syncMonitor, _syncCond, _pendingSync); dp->close(); - dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, false); + dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _fileHeaderContext, false); { std::lock_guard guard(_lock); _parts[serialNum] = dp; @@ -394,27 +395,32 @@ Domain::grabCurrentChunk(const UniqueLock & guard) { void Domain::commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard) { assert(chunkOrderGuard.mutex() == &_currentChunkMonitor && chunkOrderGuard.owns_lock()); - _singleCommitter->execute( makeLambdaTask([this, chunk = std::move(chunk)]() mutable { - doCommit(std::move(chunk)); + if (chunk->getPacket().empty()) return; + std::promise<SerializedChunk> promise; + std::future<SerializedChunk> future = promise.get_future(); + _executor.execute(makeLambdaTask([promise=std::move(promise), chunk = std::move(chunk), + encoding=_config.getEncoding(), compressionLevel=_config.getCompressionlevel()]() mutable { + promise.set_value(SerializedChunk(std::move(chunk), encoding, compressionLevel)); + })); + _singleCommitter->execute( makeLambdaTask([this, future = std::move(future)]() mutable { + doCommit(future.get()); })); } + + void -Domain::doCommit(std::unique_ptr<CommitChunk> chunk) { - const Packet & packet = chunk->getPacket(); - if (packet.empty()) return; - - vespalib::nbostream_longlivedbuf is(packet.getHandle().data(), packet.getHandle().size()); - Packet::Entry entry; - entry.deserialize(is); - DomainPart::SP dp = optionallyRotateFile(entry.serial()); - dp->commit(entry.serial(), packet); +Domain::doCommit(const SerializedChunk & serialized) { + + SerialNumRange range = serialized.range(); + DomainPart::SP dp = optionallyRotateFile(range.from()); + dp->commit(serialized); if (_config.getFSyncOnCommit()) { dp->sync(); } cleanSessions(); LOG(debug, "Releasing %zu acks and %zu entries and %zu bytes.", - chunk->getNumCallBacks(), chunk->getPacket().size(), chunk->sizeBytes()); + serialized.commitChunk().getNumCallBacks(), serialized.getNumEntries(), serialized.getData().size()); } bool @@ -457,8 +463,8 @@ Domain::startSession(int sessionId) std::lock_guard guard(_sessionLock); SessionList::iterator found = _sessions.find(sessionId); if (found != _sessions.end()) { - found->second->setStartTime(std::chrono::steady_clock::now()); - if ( execute(Session::createTask(found->second)).get() == nullptr ) { + found->second->setStartTime(vespalib::steady_clock::now()); + if ( _executor.execute(Session::createTask(found->second)).get() == nullptr ) { retval = 0; } else { _sessions.erase(sessionId); @@ -470,14 +476,13 @@ Domain::startSession(int sessionId) int Domain::closeSession(int sessionId) { - _executor.sync(); int retval(-1); DurationSeconds sessionRunTime(0); { std::lock_guard guard(_sessionLock); SessionList::iterator found = _sessions.find(sessionId); if (found != _sessions.end()) { - sessionRunTime = (std::chrono::steady_clock::now() - found->second->getStartTime()); + sessionRunTime = (vespalib::steady_clock::now() - found->second->getStartTime()); retval = 1; } } @@ -523,7 +528,7 @@ Domain::scanDir() if (ename[wantPrefixLen] != '-') continue; const char *p = ename + wantPrefixLen + 1; - uint64_t num = strtoull(p, NULL, 10); + uint64_t num = strtoull(p, nullptr, 10); string checkName = fmt("%s-%016" PRIu64, _name.c_str(), num); if (strcmp(checkName.c_str(), ename) != 0) continue; diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.h b/searchlib/src/vespa/searchlib/transactionlog/domain.h index 01bf552c060..eb3d0b6b10b 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.h @@ -53,7 +53,6 @@ public: getDir(const vespalib::string & base, const vespalib::string & domain) { return base + "/" + domain; } - vespalib::Executor::Task::UP execute(vespalib::Executor::Task::UP task); uint64_t size() const; Domain & setConfig(const DomainConfig & cfg); private: @@ -65,7 +64,7 @@ private: std::unique_ptr<CommitChunk> grabCurrentChunk(const UniqueLock & guard); void commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard); - void doCommit(std::unique_ptr<CommitChunk> chunk); + void doCommit(const SerializedChunk & serialized); SerialNum begin(const UniqueLock & guard) const; SerialNum end(const UniqueLock & guard) const; size_t byteSize(const UniqueLock & guard) const; diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainconfig.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainconfig.cpp index b4ab8a1c791..1f414eaa6d3 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainconfig.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domainconfig.cpp @@ -1,15 +1,25 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "domainconfig.h" +#include <vespa/vespalib/util/exceptions.h> namespace search::transactionlog { DomainConfig::DomainConfig() - : _encoding(Encoding::Crc::xxh64, Encoding::Compression::none), + : _encoding(Encoding::Crc::xxh64, Encoding::Compression::zstd), _compressionLevel(9), _fSyncOnCommit(false), _partSizeLimit(0x10000000), // 256M _chunkSizeLimit(0x40000) // 256k { } +DomainConfig & +DomainConfig::setEncoding(Encoding v) { + if (v.getCompression() == Encoding::none) { + throw vespalib::IllegalArgumentException("Compression:none is not allowed for the tls", VESPA_STRLOC); + } + _encoding = v; + return *this; +} + } diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainconfig.h b/searchlib/src/vespa/searchlib/transactionlog/domainconfig.h index 186227ae958..7701896fa92 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainconfig.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domainconfig.h @@ -11,7 +11,7 @@ class DomainConfig { public: using duration = vespalib::duration; DomainConfig(); - DomainConfig & setEncoding(Encoding v) { _encoding = v; return *this; } + DomainConfig & setEncoding(Encoding v); DomainConfig & setPartSizeLimit(size_t v) { _partSizeLimit = v; return *this; } DomainConfig & setChunkSizeLimit(size_t v) { _chunkSizeLimit = v; return *this; } DomainConfig & setCompressionLevel(uint8_t v) { _compressionLevel = v; return *this; } diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp index ee575820cce..2ca2f15545d 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp @@ -247,11 +247,9 @@ DomainPart::buildPacketMapping(bool allowTruncate) return currPos; } -DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, Encoding encoding, - uint8_t compressionLevel, const FileHeaderContext &fileHeaderContext, bool allowTruncate) - : _encoding(encoding), - _compressionLevel(compressionLevel), - _lock(), +DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, + const FileHeaderContext &fileHeaderContext, bool allowTruncate) + : _lock(), _fileLock(), _range(s), _sz(0), @@ -379,38 +377,21 @@ DomainPart::erase(SerialNum to) } void -DomainPart::commit(SerialNum firstSerial, const Packet &packet) +DomainPart::commit(const SerializedChunk & serialized) { + SerialNumRange range = serialized.range(); + int64_t firstPos(byteSize()); - nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size()); + assert(_range.to() < range.to()); + _sz += serialized.getNumEntries(); + _range.to(range.to()); if (_range.from() == 0) { - _range.from(firstSerial); - } - IChunk::UP chunk = IChunk::create(_encoding, _compressionLevel); - for (size_t i(0); h.size() > 0; i++) { - //LOG(spam, - //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)", - //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining()); - Packet::Entry entry; - entry.deserialize(h); - if (_range.to() < entry.serial()) { - chunk->add(entry); - if (_encoding.getCompression() == Encoding::Compression::none) { - write(*_transLog, *chunk); - chunk = IChunk::create(_encoding, _compressionLevel); - } - _sz++; - _range.to(entry.serial()); - } else { - throw runtime_error(fmt("Incoming serial number(%" PRIu64 ") must be bigger than the last one (%" PRIu64 ").", - entry.serial(), _range.to())); - } - } - if ( ! chunk->getEntries().empty()) { - write(*_transLog, *chunk); + _range.from(range.from()); } + + write(*_transLog, range, serialized.getData()); std::lock_guard guard(_lock); - _skipList.emplace_back(firstSerial, firstPos); + _skipList.emplace_back(range.from(), firstPos); } void @@ -445,26 +426,15 @@ DomainPart::visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet) } void -DomainPart::write(FastOS_FileInterface &file, const IChunk & chunk) +DomainPart::write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf) { - nbostream os; - size_t begin = os.wp(); - os << _encoding.getRaw(); // Placeholder for encoding - os << uint32_t(0); // Placeholder for size - Encoding realEncoding = chunk.encode(os); - size_t end = os.wp(); - os.wp(0); - os << realEncoding.getRaw(); //Patching real encoding - os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size. - os.wp(end); std::lock_guard guard(_writeLock); - if ( ! file.CheckedWrite(os.data(), os.size()) ) { - throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), chunk.range(), os.size())); + if ( ! file.CheckedWrite(buf.data(), buf.size()) ) { + throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), range, buf.size())); } - LOG(debug, "Wrote chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)", - chunk.getEntries().size(), os.size(), chunk.range().from(), chunk.range().to(), _encoding.getRaw(), realEncoding.getRaw()); - _writtenSerial = chunk.range().to(); - _byteSize.fetch_add(os.size(), std::memory_order_release); + LOG(debug, "Wrote chunk with and %zu bytes, range[%" PRIu64 ", %" PRIu64 "]", buf.size(), range.from(), range.to()); + _writtenSerial = range.to(); + _byteSize.fetch_add(buf.size(), std::memory_order_release); } bool diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h index 9ab0db54391..ea5290c433b 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h @@ -19,13 +19,13 @@ public: using SP = std::shared_ptr<DomainPart>; DomainPart(const DomainPart &) = delete; DomainPart& operator=(const DomainPart &) = delete; - DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, Encoding defaultEncoding, - uint8_t compressionLevel, const common::FileHeaderContext &FileHeaderContext, bool allowTruncate); + DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, + const common::FileHeaderContext &FileHeaderContext, bool allowTruncate); ~DomainPart(); const vespalib::string &fileName() const { return _fileName; } - void commit(SerialNum firstSerial, const Packet &packet); + void commit(const SerializedChunk & serialized); bool erase(SerialNum to); bool visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet); bool close(); @@ -49,7 +49,7 @@ private: static Packet readPacket(FastOS_FileInterface & file, SerialNumRange wanted, size_t targetSize, bool allowTruncate); static bool read(FastOS_FileInterface &file, IChunk::UP & chunk, Alloc &buf, bool allowTruncate); - void write(FastOS_FileInterface &file, const IChunk & entry); + void write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf); void writeHeader(const common::FileHeaderContext &fileHeaderContext); class SkipInfo @@ -69,8 +69,6 @@ private: SerialNum _id; uint64_t _pos; }; - const Encoding _encoding; - const uint8_t _compressionLevel; std::mutex _lock; std::mutex _fileLock; SerialNumRange _range; diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp index ee1631ea8c2..d86d9dc763c 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp @@ -8,6 +8,9 @@ #include <cassert> #include <ostream> +#include <vespa/log/log.h> +LOG_SETUP(".searchlib.transactionlog.ichunk"); + using std::make_unique; using vespalib::make_string_short::fmt; using vespalib::nbostream_longlivedbuf; @@ -115,4 +118,51 @@ std::ostream & operator << (std::ostream & os, Encoding e) { return os << "crc=" << e.getCrc() << " compression=" << e.getCompression(); } + +void +encode(vespalib::nbostream & os, const IChunk & chunk, Encoding encoding) { + size_t begin = os.wp(); + os << encoding.getRaw(); // Placeholder for encoding + os << uint32_t(0); // Placeholder for size + Encoding realEncoding = chunk.encode(os); + size_t end = os.wp(); + os.wp(0); + os << realEncoding.getRaw(); //Patching real encoding + os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size. + os.wp(end); + SerialNumRange range = chunk.range(); + LOG(spam, "Encoded chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)", + chunk.getEntries().size(), os.size(), range.from(), range.to(), encoding.getRaw(), realEncoding.getRaw()); +} + +SerializedChunk::SerializedChunk(std::unique_ptr<CommitChunk> commitChunk, Encoding encoding, uint8_t compressionLevel) + : _commitChunk(std::move(commitChunk)), + _os(), + _range(_commitChunk->getPacket().range()), + _numEntries(_commitChunk->getPacket().size()) +{ + const Packet & packet = _commitChunk->getPacket(); + nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size()); + + IChunk::UP chunk = IChunk::create(encoding, compressionLevel); + SerialNum prev = 0; + for (size_t i(0); h.size() > 0; i++) { + //LOG(spam, + //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)", + //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining()); + Packet::Entry entry; + entry.deserialize(h); + assert (prev < entry.serial()); + chunk->add(entry); + prev = entry.serial(); + } + assert(! chunk->getEntries().empty()); + encode(_os, *chunk, encoding); +} + +SerializedChunk::~SerializedChunk() = default; + +vespalib::ConstBufferRef SerializedChunk::getData() const { + return vespalib::ConstBufferRef(_os.data(), _os.size()); +} } diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h index 02bd0ce9426..cf8d12c1feb 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h +++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h @@ -33,6 +33,30 @@ private: std::ostream & operator << (std::ostream & os, Encoding e); /** + * Represents a completely encoded chunk with a buffer ready to be persisted, + * and the range and number of entries it covers. + */ +class SerializedChunk { +public: + SerializedChunk(std::unique_ptr<CommitChunk> chunk, Encoding encoding, uint8_t compressionLevel); + SerializedChunk(SerializedChunk &&) = default; + SerializedChunk & operator=(SerializedChunk &&) = default; + SerializedChunk(const SerializedChunk &) = delete; + ~SerializedChunk(); + SerializedChunk & operator=(const SerializedChunk &) = delete; + vespalib::ConstBufferRef getData() const; + SerialNumRange range() const { return _range; } + size_t getNumEntries() const { return _numEntries; } + const CommitChunk & commitChunk() const { return *_commitChunk; } +private: + // CommitChunk is required to ensure we do not reply until committed to the TLS. + std::unique_ptr<CommitChunk> _commitChunk; + vespalib::nbostream _os; + SerialNumRange _range; + size_t _numEntries; +}; + +/** * Interface for different chunk formats. * Format specifies both crc type, and compression type. */ diff --git a/searchlib/src/vespa/searchlib/transactionlog/session.h b/searchlib/src/vespa/searchlib/transactionlog/session.h index a1f7440525d..3e55387b81c 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/session.h +++ b/searchlib/src/vespa/searchlib/transactionlog/session.h @@ -3,8 +3,6 @@ #include "common.h" #include <vespa/vespalib/util/executor.h> -#include <chrono> -#include <deque> #include <atomic> class FastOS_FileInterface; @@ -19,10 +17,10 @@ class Session { private: using Task = vespalib::Executor::Task; - using time_point = std::chrono::time_point<std::chrono::steady_clock>; + using steady_time = vespalib::steady_time; public: - typedef std::shared_ptr<Session> SP; + using SP = std::shared_ptr<Session>; Session(const Session &) = delete; Session & operator = (const Session &) = delete; Session(int sId, const SerialNumRange & r, const DomainSP & d, std::unique_ptr<Destination> destination); @@ -32,8 +30,8 @@ public: bool inSync() const { return _inSync; } bool finished() const; static Task::UP createTask(const Session::SP & session); - void setStartTime(time_point startTime) { _startTime = startTime; } - time_point getStartTime() const { return _startTime; } + void setStartTime(steady_time startTime) { _startTime = startTime; } + steady_time getStartTime() const { return _startTime; } bool isVisitRunning() const { return _visitRunning; } private: class VisitTask : public Task { @@ -60,7 +58,7 @@ private: std::atomic<bool> _visitRunning; std::atomic<bool> _inSync; std::atomic<bool> _finished; - time_point _startTime; + steady_time _startTime; }; } diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp index 08ee944e749..db2cf2a255d 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp @@ -82,7 +82,7 @@ VESPA_THREAD_STACK_TAG(tls_executor); TransLogServer::TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir, const FileHeaderContext &fileHeaderContext) : TransLogServer(name, listenPort, baseDir, fileHeaderContext, - DomainConfig().setEncoding(Encoding(Encoding::xxh64, Encoding::Compression::none)) + DomainConfig().setEncoding(Encoding(Encoding::xxh64, Encoding::Compression::zstd)) .setPartSizeLimit(0x10000000).setChunkSizeLimit(0x40000)) {} @@ -578,9 +578,10 @@ TransLogServer::domainCommit(FRT_RPCRequest *req) try { vespalib::Gate gate; { + auto onDone = make_shared<vespalib::GateCallback>(gate); // Need to scope in order to drain out all the callbacks. - domain->append(packet, make_shared<vespalib::GateCallback>(gate)); - auto keep = domain->startCommit(make_shared<vespalib::IgnoreCallback>()); + domain->append(packet, onDone); + auto keep = domain->startCommit(onDone); } gate.await(); ret.AddInt32(0); diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp index 2d2863af874..9ca3b678054 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp @@ -42,7 +42,6 @@ getCompression(searchlib::TranslogserverConfig::Compression::Type type) { switch (type) { case searchlib::TranslogserverConfig::Compression::Type::NONE: - return Encoding::Compression::none; case searchlib::TranslogserverConfig::Compression::Type::NONE_MULTI: return Encoding::Compression::none_multi; case searchlib::TranslogserverConfig::Compression::Type::LZ4: diff --git a/searchlib/src/vespa/searchlib/util/dirtraverse.cpp b/searchlib/src/vespa/searchlib/util/dirtraverse.cpp index 6ab5d42d350..07dbc9a247d 100644 --- a/searchlib/src/vespa/searchlib/util/dirtraverse.cpp +++ b/searchlib/src/vespa/searchlib/util/dirtraverse.cpp @@ -15,17 +15,16 @@ static int cmpname(const void *av, const void *bv) *(const DirectoryTraverse::Name *const *) av; const DirectoryTraverse::Name *const b = *(const DirectoryTraverse::Name *const *) bv; - return strcmp(a->_name, b->_name); + return a->_name.compare(b->_name.c_str()); } } DirectoryTraverse::Name::Name(const char *name) - : _name(nullptr), + : _name(name), _next(nullptr) { - _name = strdup(name); } -DirectoryTraverse::Name::~Name() { free(_name); } +DirectoryTraverse::Name::~Name() = default; DirectoryTraverse::Name * DirectoryTraverse::Name::sort(Name *head, int count) @@ -132,19 +131,15 @@ DirectoryTraverse::ScanSingleDir() assert(_nameHead == nullptr); assert(_nameCount == 0); delete _curDir; - free(_fullDirName); - _fullDirName = nullptr; + _fullDirName.clear(); _curDir = UnQueueDir(); if (_curDir == nullptr) return; - _fullDirName = (char *) malloc(strlen(_baseDir) + 1 + - strlen(_curDir->_name) + 1); - strcpy(_fullDirName, _baseDir); - if (_curDir->_name[0] != '\0') { - strcat(_fullDirName, "/"); - strcat(_fullDirName, _curDir->_name); + _fullDirName = _baseDir; + if ( ! _curDir->_name.empty()) { + _fullDirName += "/" + _curDir->_name; } - FastOS_DirectoryScan *dirscan = new FastOS_DirectoryScan(_fullDirName); + FastOS_DirectoryScan *dirscan = new FastOS_DirectoryScan(_fullDirName.c_str()); while (dirscan->ReadNext()) { const char *name = dirscan->GetName(); if (strcmp(name, ".") == 0 || @@ -171,13 +166,8 @@ DirectoryTraverse::NextName() if (_nameHead == nullptr) return false; _curName = UnQueueName(); - free(_fullName); - _fullName = (char *) malloc(strlen(_fullDirName) + 1 + - strlen(_curName->_name) + 1); - strcpy(_fullName, _fullDirName); - _relName = _fullName + strlen(_baseDir) + 1; - strcat(_fullName, "/"); - strcat(_fullName, _curName->_name); + _fullName = _fullDirName + "/" + _curName->_name; + _relName = _fullName.c_str() + (_baseDir.size() + 1); return true; } @@ -193,13 +183,8 @@ DirectoryTraverse::NextRemoveDir() return false; curName = _rdirHead; _rdirHead = curName->_next; - free(_fullName); - _fullName = (char *) malloc(strlen(_baseDir) + 1 + - strlen(curName->_name) + 1); - strcpy(_fullName, _baseDir); - _relName = _fullName + strlen(_baseDir) + 1; - strcat(_fullName, "/"); - strcat(_fullName, curName->_name); + _fullName = _baseDir + "/" + curName->_name; + _relName = _fullName.c_str() + _baseDir.size() + 1; delete curName; return true; } @@ -226,7 +211,7 @@ DirectoryTraverse::RemoveTree() const char *fullname = GetFullName(); FastOS_File::RemoveDirectory(fullname); } - FastOS_File::RemoveDirectory(_baseDir); + FastOS_File::RemoveDirectory(_baseDir.c_str()); return true; } @@ -252,7 +237,7 @@ DirectoryTraverse::GetTreeSize() } DirectoryTraverse::DirectoryTraverse(const char *baseDir) - : _baseDir(nullptr), + : _baseDir(baseDir), _nameHead(nullptr), _nameCount(0), _dirHead(nullptr), @@ -261,11 +246,10 @@ DirectoryTraverse::DirectoryTraverse(const char *baseDir) _rdirHead(nullptr), _curDir(nullptr), _curName(nullptr), - _fullDirName(nullptr), - _fullName(nullptr), + _fullDirName(), + _fullName(), _relName(nullptr) { - _baseDir = strdup(baseDir); QueueDir(""); ScanSingleDir(); } @@ -273,9 +257,6 @@ DirectoryTraverse::DirectoryTraverse(const char *baseDir) DirectoryTraverse::~DirectoryTraverse() { - free(_fullDirName); - free(_fullName); - free(_baseDir); delete _curDir; delete _curName; PushPushedDirs(); diff --git a/searchlib/src/vespa/searchlib/util/dirtraverse.h b/searchlib/src/vespa/searchlib/util/dirtraverse.h index bff7aae705a..4a96ad0935d 100644 --- a/searchlib/src/vespa/searchlib/util/dirtraverse.h +++ b/searchlib/src/vespa/searchlib/util/dirtraverse.h @@ -3,6 +3,7 @@ #pragma once #include <cstdint> +#include <string> namespace search { @@ -20,14 +21,14 @@ public: Name& operator=(const Name &); public: - char *_name; + std::string _name; Name *_next; explicit Name(const char *name); ~Name(); static Name *sort(Name *head, int count); }; private: - char *_baseDir; + std::string _baseDir; Name *_nameHead; int _nameCount; Name *_dirHead; @@ -36,11 +37,11 @@ private: Name *_rdirHead; Name *_curDir; Name *_curName; - char *_fullDirName; - char *_fullName; - char *_relName; + std::string _fullDirName; + std::string _fullName; + const char *_relName; public: - const char *GetFullName() const { return _fullName; } + const char *GetFullName() const { return _fullName.c_str(); } const char *GetRelName() const { return _relName; } void QueueDir(const char *name); void PushDir(const char *name); diff --git a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp index bd05dd4b0f5..19f6a7eef01 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp +++ b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp @@ -1,6 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "docsumstate.h" #include "keywordextractor.h" #include "idocsumenvironment.h" #include <vespa/searchlib/parsequery/stackdumpiterator.h> @@ -20,7 +19,7 @@ bool useful(search::ParseItem::ItemCreator creator) KeywordExtractor::KeywordExtractor(IDocsumEnvironment * env) : _env(env), - _legalPrefixes(NULL), + _legalPrefixes(nullptr), _legalIndexes() { } @@ -28,7 +27,7 @@ KeywordExtractor::KeywordExtractor(IDocsumEnvironment * env) KeywordExtractor::~KeywordExtractor() { - while (_legalPrefixes != NULL) { + while (_legalPrefixes != nullptr) { IndexPrefix *tmp = _legalPrefixes; _legalPrefixes = tmp->_next; delete tmp; @@ -42,32 +41,25 @@ KeywordExtractor::IsLegalIndexName(const char *idxName) const } KeywordExtractor::IndexPrefix::IndexPrefix(const char *prefix, IndexPrefix **list) - : _prefix(NULL), - _prefixLen(0), - _next(NULL) + : _prefix(prefix), + _next(nullptr) { - _prefix = strdup(prefix); - assert(_prefix != NULL); - _prefixLen = strlen(prefix); _next = *list; *list = this; } -KeywordExtractor::IndexPrefix::~IndexPrefix() -{ - free(_prefix); -} +KeywordExtractor::IndexPrefix::~IndexPrefix() = default; bool KeywordExtractor::IndexPrefix::Match(const char *idxName) const { - return (strncmp(idxName, _prefix, _prefixLen) == 0); + return vespalib::starts_with(idxName, _prefix); } void KeywordExtractor::AddLegalIndexSpec(const char *spec) { - if (spec == NULL) + if (spec == nullptr) return; vespalib::string toks(spec); // tokens @@ -107,9 +99,9 @@ KeywordExtractor::GetLegalIndexSpec() { vespalib::string spec; - if (_legalPrefixes != NULL) { + if (_legalPrefixes != nullptr) { for (IndexPrefix *pt = _legalPrefixes; - pt != NULL; pt = pt->_next) { + pt != nullptr; pt = pt->_next) { if (spec.size() > 0) spec.append(';'); spec.append(pt->_prefix); @@ -131,7 +123,7 @@ KeywordExtractor::IsLegalIndex(vespalib::stringref idxS) const { vespalib::string resolvedIdxName; - if (_env != NULL) { + if (_env != nullptr) { resolvedIdxName = _env->lookupIndex(idxS); } else { @@ -238,7 +230,7 @@ KeywordExtractor::ExtractKeywords(vespalib::stringref buf) const // Must now allocate a string and copy the data from the rawbuf void *result = malloc(keywords.GetUsedLen()); - if (result != NULL) { + if (result != nullptr) { memcpy(result, keywords.GetDrainPos(), keywords.GetUsedLen()); } return static_cast<char *>(result); diff --git a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h index 50d72f7a7d0..44c85121058 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h +++ b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h @@ -24,9 +24,8 @@ public: IndexPrefix& operator=(const IndexPrefix &); public: - char *_prefix; - int _prefixLen; - IndexPrefix *_next; + vespalib::string _prefix; + IndexPrefix *_next; IndexPrefix(const char *prefix, IndexPrefix **list); ~IndexPrefix(); @@ -42,7 +41,7 @@ private: bool IsLegalIndexPrefix(const char *idxName) const { for (const IndexPrefix *pt = _legalPrefixes; - pt != NULL; + pt != nullptr; pt = pt->_next) { if (pt->Match(idxName)) diff --git a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp index 28105af57d5..d3c0caeec48 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp +++ b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp @@ -125,6 +125,8 @@ ResultConfig::ReadConfig(const vespa::config::search::SummaryConfig &cfg, const Reset(); int maxclassID = 0x7fffffff; // avoid negative classids _defaultSummaryId = cfg.defaultsummaryid; + _useV8geoPositions = cfg.usev8geopositions; + for (uint32_t i = 0; rc && i < cfg.classes.size(); i++) { const auto& cfg_class = cfg.classes[i]; if (cfg_class.name.empty()) { diff --git a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.h b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.h index 3be83014fdb..8a8bfabaaec 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.h +++ b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.h @@ -33,6 +33,7 @@ private: typedef vespalib::hash_map<vespalib::string, uint32_t> NameMap; typedef vespalib::hash_map<uint32_t, ResultClass::UP> IdMap; uint32_t _defaultSummaryId; + bool _useV8geoPositions; search::util::StringEnum _fieldEnum; IdMap _classLookup; NameMap _nameLookup; // name -> class id @@ -41,6 +42,7 @@ private: void Init(); public: + bool useV8geoPositions() const { return _useV8geoPositions; } class iterator { public: iterator(IdMap::iterator it) : _it(it) { } diff --git a/security-utils/pom.xml b/security-utils/pom.xml index b7c7c110ad8..39a52fb12db 100644 --- a/security-utils/pom.xml +++ b/security-utils/pom.xml @@ -60,6 +60,16 @@ <artifactId>mockito-core</artifactId> <scope>test</scope> </dependency> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.vintage</groupId> + <artifactId>junit-vintage-engine</artifactId> + <scope>test</scope> + </dependency> </dependencies> <build> <plugins> diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java new file mode 100644 index 00000000000..46a38a77844 --- /dev/null +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java @@ -0,0 +1,82 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.security.tls.policy; + +import java.util.Arrays; +import java.util.Objects; +import java.util.regex.Pattern; + +/** + * Matching engine for glob patterns having where one ore more alternative characters acts a boundary for wildcard matching. + * + * @author bjorncs + */ +class GlobPattern { + private final String pattern; + private final char[] boundaries; + private final Pattern regexPattern; + + GlobPattern(String pattern, char[] boundaries, boolean enableSingleCharWildcard) { + this.pattern = pattern; + this.boundaries = boundaries; + this.regexPattern = toRegexPattern(pattern, boundaries, enableSingleCharWildcard); + } + + boolean matches(String value) { return regexPattern.matcher(value).matches(); } + + String asString() { return pattern; } + Pattern regexPattern() { return regexPattern; } + char[] boundaries() { return boundaries; } + + private static Pattern toRegexPattern(String pattern, char[] boundaries, boolean enableSingleCharWildcard) { + StringBuilder builder = new StringBuilder("^"); + StringBuilder precedingCharactersToQuote = new StringBuilder(); + char[] chars = pattern.toCharArray(); + for (char c : chars) { + if ((enableSingleCharWildcard && c == '?') || c == '*') { + builder.append(quotePrecedingLiteralsAndReset(precedingCharactersToQuote)); + // Note: we explicitly stop matching at a separator boundary. + // This is to make matching less vulnerable to dirty tricks (e.g dot as boundary for hostnames). + // Same applies for single chars; they should only match _within_ a boundary. + builder.append("[^").append(Pattern.quote(new String(boundaries))).append("]"); + if (c == '*') builder.append('*'); + } else { + precedingCharactersToQuote.append(c); + } + } + return Pattern.compile(builder.append(quotePrecedingLiteralsAndReset(precedingCharactersToQuote)).append('$').toString()); + } + + // Combines multiple subsequent literals inside a single quote to simplify produced regex patterns + private static String quotePrecedingLiteralsAndReset(StringBuilder literals) { + if (literals.length() > 0) { + String quoted = literals.toString(); + literals.setLength(0); + return Pattern.quote(quoted); + } + return ""; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GlobPattern that = (GlobPattern) o; + return Objects.equals(pattern, that.pattern) && Arrays.equals(boundaries, that.boundaries); + } + + @Override + public int hashCode() { + int result = Objects.hash(pattern); + result = 31 * result + Arrays.hashCode(boundaries); + return result; + } + + @Override + public String toString() { + return "GlobPattern{" + + "pattern='" + pattern + '\'' + + ", boundaries=" + Arrays.toString(boundaries) + + ", regexPattern=" + regexPattern + + '}'; + } +} diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java index fd9a233d609..cb9ba13cae4 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java @@ -2,60 +2,32 @@ package com.yahoo.security.tls.policy; import java.util.Objects; -import java.util.regex.Pattern; /** * @author bjorncs */ class HostGlobPattern implements RequiredPeerCredential.Pattern { - private final String pattern; - private final Pattern regexPattern; + private final GlobPattern globPattern; HostGlobPattern(String pattern) { - this.pattern = pattern; - this.regexPattern = toRegexPattern(pattern); + this.globPattern = new GlobPattern(pattern, new char[] {'.'}, true); } @Override public String asString() { - return pattern; + return globPattern.asString(); } @Override public boolean matches(String hostString) { - return regexPattern.matcher(hostString).matches(); - } - - private static Pattern toRegexPattern(String pattern) { - StringBuilder builder = new StringBuilder("^"); - for (char c : pattern.toCharArray()) { - if (c == '*') { - // Note: we explicitly stop matching at a dot separator boundary. - // This is to make host name matching less vulnerable to dirty tricks. - builder.append("[^.]*"); - } else if (c == '?') { - // Same applies for single chars; they should only match _within_ a dot boundary. - builder.append("[^.]"); - } else if (isRegexMetaCharacter(c)){ - builder.append("\\"); - builder.append(c); - } else { - builder.append(c); - } - } - builder.append('$'); - return Pattern.compile(builder.toString()); - } - - private static boolean isRegexMetaCharacter(char c) { - return "<([{\\^-=$!|]})?*+.>".indexOf(c) != -1; // note: includes '?' and '*' + return globPattern.matches(hostString); } @Override public String toString() { return "HostGlobPattern{" + - "pattern='" + pattern + '\'' + + "pattern='" + globPattern + '\'' + '}'; } @@ -64,11 +36,11 @@ class HostGlobPattern implements RequiredPeerCredential.Pattern { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; HostGlobPattern that = (HostGlobPattern) o; - return Objects.equals(pattern, that.pattern); + return Objects.equals(globPattern, that.globPattern); } @Override public int hashCode() { - return Objects.hash(pattern); + return Objects.hash(globPattern); } } diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/RequiredPeerCredential.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/RequiredPeerCredential.java index 3bdc35c6a2f..4c96a2935f8 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/RequiredPeerCredential.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/RequiredPeerCredential.java @@ -28,7 +28,7 @@ public class RequiredPeerCredential { case SAN_DNS: return new HostGlobPattern(pattern); case SAN_URI: - return new UriPattern(pattern); + return new UriGlobPattern(pattern); default: throw new IllegalArgumentException("Unknown field: " + field); } diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/UriPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java index 7c36244d781..b2cc0688bb9 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/UriPattern.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java @@ -8,26 +8,22 @@ import java.util.Objects; * * @author bjorncs */ -class UriPattern implements RequiredPeerCredential.Pattern { +class UriGlobPattern implements RequiredPeerCredential.Pattern { - private final String pattern; + private final GlobPattern globPattern; - UriPattern(String pattern) { - this.pattern = pattern; + UriGlobPattern(String globPattern) { + this.globPattern = new GlobPattern(globPattern, new char[] {'/'}, false); } - @Override public String asString() { return pattern; } + @Override public String asString() { return globPattern.asString(); } - @Override - public boolean matches(String fieldValue) { - // Only exact match is supported (unlike for host names) - return fieldValue.equals(pattern); - } + @Override public boolean matches(String fieldValue) { return globPattern.matches(fieldValue); } @Override public String toString() { return "UriPattern{" + - "pattern='" + pattern + '\'' + + "pattern='" + globPattern + '\'' + '}'; } @@ -35,12 +31,12 @@ class UriPattern implements RequiredPeerCredential.Pattern { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - UriPattern that = (UriPattern) o; - return Objects.equals(pattern, that.pattern); + UriGlobPattern that = (UriGlobPattern) o; + return Objects.equals(globPattern, that.globPattern); } @Override public int hashCode() { - return Objects.hash(pattern); + return Objects.hash(globPattern); } } diff --git a/security-utils/src/test/java/com/yahoo/security/tls/authz/PeerAuthorizerTest.java b/security-utils/src/test/java/com/yahoo/security/tls/authz/PeerAuthorizerTest.java index 518ec08d38e..fdfed781286 100644 --- a/security-utils/src/test/java/com/yahoo/security/tls/authz/PeerAuthorizerTest.java +++ b/security-utils/src/test/java/com/yahoo/security/tls/authz/PeerAuthorizerTest.java @@ -102,17 +102,17 @@ public class PeerAuthorizerTest { } @Test - public void can_exact_match_policy_with_san_uri_pattern() { + public void can_match_policy_with_san_uri_pattern() { RequiredPeerCredential cnRequirement = createRequiredCredential(CN, "*.matching.cn"); - RequiredPeerCredential sanUriRequirement = createRequiredCredential(SAN_URI, "myscheme://my/exact/uri"); + RequiredPeerCredential sanUriRequirement = createRequiredCredential(SAN_URI, "myscheme://my/*/uri"); PeerAuthorizer authorizer = createPeerAuthorizer(createPolicy(POLICY_1, createRoles(ROLE_1), cnRequirement, sanUriRequirement)); - AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.irrelevant.san"), singletonList("myscheme://my/exact/uri"))); + AuthorizationResult result = authorizer.authorizePeer(createCertificate("foo.matching.cn", singletonList("foo.irrelevant.san"), singletonList("myscheme://my/matching/uri"))); assertAuthorized(result); assertThat(result.assumedRoles()).extracting(Role::name).containsOnly(ROLE_1); assertThat(result.matchedPolicies()).containsOnly(POLICY_1); - assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", emptyList(), singletonList("myscheme://my/nonmatching/uri")))); + assertUnauthorized(authorizer.authorizePeer(createCertificate("foo.matching.cn", emptyList(), singletonList("myscheme://my/nonmatching/url")))); } private static X509Certificate createCertificate(String subjectCn, List<String> sanDns, List<String> sanUri) { diff --git a/security-utils/src/test/java/com/yahoo/security/tls/json/TransportSecurityOptionsJsonSerializerTest.java b/security-utils/src/test/java/com/yahoo/security/tls/json/TransportSecurityOptionsJsonSerializerTest.java index ffc8296c890..6bca49aee83 100644 --- a/security-utils/src/test/java/com/yahoo/security/tls/json/TransportSecurityOptionsJsonSerializerTest.java +++ b/security-utils/src/test/java/com/yahoo/security/tls/json/TransportSecurityOptionsJsonSerializerTest.java @@ -20,7 +20,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; +import java.util.LinkedHashSet; import static com.yahoo.security.tls.policy.RequiredPeerCredential.Field.CN; import static com.yahoo.security.tls.policy.RequiredPeerCredential.Field.SAN_DNS; @@ -46,7 +46,7 @@ public class TransportSecurityOptionsJsonSerializerTest { .withHostnameValidationDisabled(false) .withAuthorizedPeers( new AuthorizedPeers( - new HashSet<>(Arrays.asList( + new LinkedHashSet<>(Arrays.asList( new PeerPolicy("cfgserver", "cfgserver policy description", singleton(new Role("myrole")), Arrays.asList( RequiredPeerCredential.of(CN, "mycfgserver"), RequiredPeerCredential.of(SAN_DNS, "*.suffix.com"), diff --git a/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java b/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java new file mode 100644 index 00000000000..4350aa2b0a9 --- /dev/null +++ b/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java @@ -0,0 +1,106 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.security.tls.policy; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * @author bjorncs + */ +class GlobPatternTest { + + @Test + public void glob_without_wildcards_matches_entire_string() { + assertMatches("foo", ".", "foo"); + assertNotMatches("foo", ".", "fooo"); + assertNotMatches("foo", ".", "ffoo"); + assertPatternHasRegex("foo", ".", "^\\Qfoo\\E$"); + } + + @Test + public void wildcard_glob_can_match_prefix() { + assertMatches("foo*", ".", "foo"); + assertMatches("foo*", ".", "foobar"); + assertNotMatches("foo*", ".", "ffoo"); + } + + @Test + public void wildcard_glob_can_match_suffix() { + assertMatches("*foo", ".", "foo"); + assertMatches("*foo", ".", "ffoo"); + assertNotMatches("*foo", ".", "fooo"); + } + + @Test + public void wildcard_glob_can_match_substring() { + assertMatches("f*o", ".", "fo"); + assertMatches("f*o", ".", "foo"); + assertMatches("f*o", ".", "ffoo"); + assertNotMatches("f*o", ".", "boo"); + } + + @Test + public void wildcard_glob_does_not_cross_multiple_dot_delimiter_boundaries() { + assertMatches("*.bar.baz", ".", "foo.bar.baz"); + assertMatches("*.bar.baz", ".", ".bar.baz"); + assertNotMatches("*.bar.baz", ".", "zoid.foo.bar.baz"); + assertMatches("foo.*.baz", ".", "foo.bar.baz"); + assertNotMatches("foo.*.baz", ".", "foo.bar.zoid.baz"); + + assertPatternHasRegex("*.bar.baz", ".", "^[^\\Q.\\E]*\\Q.bar.baz\\E$"); + } + + @Test + public void single_char_glob_matches_non_dot_characters() { + assertMatches("f?o", ".", "foo"); + assertNotMatches("f?o", ".", "fooo"); + assertNotMatches("f?o", ".", "ffoo"); + assertNotMatches("f?o", ".", "f.o"); + } + + @Test + public void other_regex_meta_characters_are_matched_as_literal_characters() { + String literals = "<([{\\^-=$!|]})+.>"; + assertMatches(literals, ".", literals); + assertPatternHasRegex(literals, ".", "^\\Q<([{\\^-=$!|]})+.>\\E$"); + } + + @Test + public void handles_patterns_with_multiple_alternative_boundaries() { + assertMatches("https://*.vespa.ai/", "./", "https://docs.vespa.ai/"); + assertMatches("https://vespa.ai/*.world", "./", "https://vespa.ai/hello.world"); + assertNotMatches("https://vespa.ai/*/", "./", "https://vespa.ai/hello.world/"); + assertMatches("https://vespa.ai/*/index.html", "./", "https://vespa.ai/path/index.html"); + } + + private void assertMatches(String pattern, String boundaries, String value) { + GlobPattern p = globPattern(pattern, boundaries); + assertTrue( + p.matches(value), + () -> String.format("Expected '%s' with boundaries '%s' to match '%s'", + pattern, Arrays.toString(p.boundaries()), value)); + } + + private void assertNotMatches(String pattern, String boundaries, String value) { + GlobPattern p = globPattern(pattern, boundaries); + assertFalse( + p.matches(value), + () -> String.format("Expected '%s' with boundaries '%s' to not match '%s'", + pattern, Arrays.toString(p.boundaries()), value)); + } + + private void assertPatternHasRegex(String pattern, String boundaries, String expectedPattern) { + GlobPattern p = globPattern(pattern, boundaries); + assertEquals(expectedPattern, p.regexPattern().pattern()); + } + + private static GlobPattern globPattern(String pattern, String boundaries) { + return new GlobPattern(pattern, boundaries.toCharArray(), true); + } + +} diff --git a/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java b/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java new file mode 100644 index 00000000000..c60c782da14 --- /dev/null +++ b/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java @@ -0,0 +1,37 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.security.tls.policy; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * @author bjorncs + */ +class UriGlobPatternTest { + + @Test + void matches_correctly() { + assertMatches("scheme://hostname/*", "scheme://hostname/mypath"); + assertMatches("scheme://hostname/*/segment2", "scheme://hostname/segment1/segment2"); + assertMatches("scheme://hostname/segment1/*", "scheme://hostname/segment1/segment2"); + assertNotMatches("scheme://hostname/*", "scheme://hostname/segment1/segment2"); + assertMatches("scheme://*/segment1/segment2", "scheme://hostname/segment1/segment2"); + assertMatches("scheme://*.name/", "scheme://host.name/"); + assertNotMatches("scheme://*", "scheme://hostname/"); + assertMatches("scheme://hostname/mypath?query=value", "scheme://hostname/mypath?query=value"); + assertNotMatches("scheme://hostname/?", "scheme://hostname/p"); + } + + private void assertMatches(String pattern, String value) { + assertTrue(new UriGlobPattern(pattern).matches(value), + () -> String.format("Expected '%s' to match '%s'", pattern, value)); + } + + private void assertNotMatches(String pattern, String value) { + assertFalse(new UriGlobPattern(pattern).matches(value), + () -> String.format("Expected '%s' to not match '%s'", pattern, value)); + } + +} diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ConfigServerHostApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ConfigServerHostApplication.java index e4e5f513ebc..909b86886b8 100644 --- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ConfigServerHostApplication.java +++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ConfigServerHostApplication.java @@ -4,7 +4,9 @@ package com.yahoo.vespa.service.duper; import com.yahoo.config.provision.NodeType; public class ConfigServerHostApplication extends HostAdminApplication { + public ConfigServerHostApplication() { super("configserver-host", NodeType.confighost); } + } diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java index 7087995aa2e..67d54091adc 100644 --- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java +++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java @@ -45,11 +45,11 @@ public class DuperModelManager implements DuperModelProvider, DuperModelInfraApi static final ConfigServerApplication configServerApplication = new ConfigServerApplication(); static final ProxyHostApplication proxyHostApplication = new ProxyHostApplication(); static final TenantHostApplication tenantHostApplication = new TenantHostApplication(); - static final DevHostApplication devHostApplicaton = new DevHostApplication(); + static final DevHostApplication devHostApplication = new DevHostApplication(); private final Map<ApplicationId, InfraApplication> supportedInfraApplications; - private static CriticalRegionChecker disallowedDuperModeLockAcquisitionRegions = + private static final CriticalRegionChecker disallowedDuperModeLockAcquisitionRegions = new CriticalRegionChecker("duper model deadlock detection"); private final ReentrantLock lock = new ReentrantLock(true); @@ -68,13 +68,14 @@ public class DuperModelManager implements DuperModelProvider, DuperModelInfraApi } /** Non-private for testing */ - public DuperModelManager(boolean multitenant, boolean isController, SuperModelProvider superModelProvider, DuperModel duperModel, FlagSource flagSource, SystemName system) { + public DuperModelManager(boolean multitenant, boolean isController, SuperModelProvider superModelProvider, + DuperModel duperModel, FlagSource flagSource, SystemName system) { this.duperModel = duperModel; if (system == SystemName.dev) { // TODO (mortent): Support controllerApplication in dev system supportedInfraApplications = - Stream.of(devHostApplicaton, configServerApplication) + Stream.of(devHostApplication, configServerApplication) .collect(Collectors.toUnmodifiableMap(InfraApplication::getApplicationId, Function.identity())); } else if (multitenant) { supportedInfraApplications = @@ -129,9 +130,7 @@ public class DuperModelManager implements DuperModelProvider, DuperModelInfraApi return Optional.ofNullable(supportedInfraApplications.get(applicationId)); } - /** - * Returns true if application is considered an infrastructure application by the DuperModel. - */ + /** Returns true if application is considered an infrastructure application by the DuperModel. */ public boolean isSupportedInfraApplication(ApplicationId applicationId) { return supportedInfraApplications.containsKey(applicationId); } @@ -234,4 +233,5 @@ public class DuperModelManager implements DuperModelProvider, DuperModelInfraApi lock.unlock(); } } + } diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/HostAdminApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/HostAdminApplication.java index 2c1b00f93a4..0b90350b7be 100644 --- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/HostAdminApplication.java +++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/HostAdminApplication.java @@ -9,10 +9,16 @@ import com.yahoo.vespa.applicationmodel.ServiceType; * @author hakonhall */ public abstract class HostAdminApplication extends InfraApplication { + public static final int HOST_ADMIN_HEALT_PORT = 8080; protected HostAdminApplication(String applicationName, NodeType nodeType) { - super(applicationName, nodeType, ClusterSpec.Type.container, ClusterSpec.Id.from(applicationName), - ServiceType.HOST_ADMIN, HOST_ADMIN_HEALT_PORT); + super(applicationName, + nodeType, + ClusterSpec.Type.container, + ClusterSpec.Id.from(applicationName), + ServiceType.HOST_ADMIN, + HOST_ADMIN_HEALT_PORT); } + } diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java index a3a13bc7b57..8a62db87e7c 100644 --- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java +++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java @@ -170,4 +170,5 @@ public abstract class InfraApplication implements InfraApplicationApi { ", healthPort=" + healthPort + '}'; } + } diff --git a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp index b962ecf611e..5601336fdfd 100644 --- a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp +++ b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp @@ -2,21 +2,20 @@ #include "slobrokserver.h" -#include <vespa/log/log.h> -LOG_SETUP(".slobrok.server"); - namespace slobrok { +VESPA_THREAD_STACK_TAG(slobrok_server_thread); + SlobrokServer::SlobrokServer(ConfigShim &shim) : _env(shim), - _thread(*this) + _thread(*this, slobrok_server_thread) { _thread.start(); } SlobrokServer::SlobrokServer(uint32_t port) : _env(ConfigShim(port)), - _thread(*this) + _thread(*this, slobrok_server_thread) { _thread.start(); } diff --git a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp index 732ab122546..dd71380f64a 100644 --- a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp +++ b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp @@ -35,13 +35,18 @@ void verifyResizeTaskLimit(bool up) { std::condition_variable cond; std::atomic<uint64_t> started(0); std::atomic<uint64_t> allowed(0); - SingleExecutor executor(sequenced_executor, 10); + constexpr uint32_t INITIAL = 20; + const uint32_t INITIAL_2inN = roundUp2inN(INITIAL); + double waterMarkRatio = 0.5; + SingleExecutor executor(sequenced_executor, INITIAL, INITIAL*waterMarkRatio, 10ms); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + EXPECT_EQUAL(uint32_t(INITIAL_2inN*waterMarkRatio), executor.get_watermark()); - uint32_t targetTaskLimit = up ? 20 : 5; + uint32_t targetTaskLimit = up ? 40 : 5; uint32_t roundedTaskLimit = roundUp2inN(targetTaskLimit); - EXPECT_NOT_EQUAL(16u, roundedTaskLimit); + EXPECT_NOT_EQUAL(INITIAL_2inN, roundedTaskLimit); - for (uint64_t i(0); i < 10; i++) { + for (uint64_t i(0); i < INITIAL; i++) { executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] { started++; std::unique_lock guard(lock); @@ -53,15 +58,16 @@ void verifyResizeTaskLimit(bool up) { while (started < 1); EXPECT_EQUAL(1u, started); executor.setTaskLimit(targetTaskLimit); - EXPECT_EQUAL(16u, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN*waterMarkRatio, executor.get_watermark()); allowed = 5; while (started < 6); EXPECT_EQUAL(6u, started); - EXPECT_EQUAL(16u, executor.getTaskLimit()); - allowed = 10; - while (started < 10); - EXPECT_EQUAL(10u, started); - EXPECT_EQUAL(16u, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + allowed = INITIAL; + while (started < INITIAL); + EXPECT_EQUAL(INITIAL, started); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] { started++; std::unique_lock guard(lock); @@ -69,11 +75,13 @@ void verifyResizeTaskLimit(bool up) { cond.wait_for(guard, 1ms); } })); - while (started < 11); - EXPECT_EQUAL(11u, started); + while (started < INITIAL + 1); + EXPECT_EQUAL(INITIAL + 1, started); EXPECT_EQUAL(roundedTaskLimit, executor.getTaskLimit()); - allowed = 11; + EXPECT_EQUAL(roundedTaskLimit*waterMarkRatio, executor.get_watermark()); + allowed = INITIAL + 1; } + TEST("test that resizing up and down works") { TEST_DO(verifyResizeTaskLimit(true)); TEST_DO(verifyResizeTaskLimit(false)); diff --git a/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h b/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h index 0e931838279..3fe6fb5d678 100644 --- a/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h +++ b/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h @@ -14,7 +14,7 @@ namespace vespalib { * Interface class to run multiple tasks in parallel, but tasks with same * id has to be run in sequence. */ -class ISequencedTaskExecutor +class ISequencedTaskExecutor : public vespalib::IWakeup { public: class ExecutorId { @@ -62,7 +62,7 @@ public: /** * Call this one to ensure you get the attention of the workers. */ - virtual void wakeup() { } + void wakeup() override { } /** * Wrap lambda function into a task and schedule it to be run. diff --git a/staging_vespalib/src/vespa/vespalib/util/jsonstream.h b/staging_vespalib/src/vespa/vespalib/util/jsonstream.h index fc2ae61b37e..d60151f0478 100644 --- a/staging_vespalib/src/vespa/vespalib/util/jsonstream.h +++ b/staging_vespalib/src/vespa/vespalib/util/jsonstream.h @@ -44,7 +44,7 @@ class JsonStream : public JsonStreamTypes { string object_key; size_t array_index; - StateEntry() + StateEntry() noexcept : state(State::ROOT), object_key(""), array_index(size_t(0)) {} StateEntry(State s) : state(s), object_key(""), array_index(size_t(0)) {} diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp index 954a63978f3..76b0235301b 100644 --- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp @@ -40,8 +40,23 @@ find(uint16_t key, const uint16_t values[], size_t numValues) { } std::unique_ptr<ISequencedTaskExecutor> -SequencedTaskExecutor::create(vespalib::Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit, - OptimizeFor optimize, uint32_t kindOfWatermark, duration reactionTime) +SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads) { + return create(func, threads, 1000); +} + +std::unique_ptr<ISequencedTaskExecutor> +SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit) { + return create(func, threads, taskLimit, OptimizeFor::LATENCY); +} + +std::unique_ptr<ISequencedTaskExecutor> +SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit, OptimizeFor optimize) { + return create(func, threads, taskLimit, optimize, 0); +} + +std::unique_ptr<ISequencedTaskExecutor> +SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit, + OptimizeFor optimize, uint32_t kindOfWatermark) { if (optimize == OptimizeFor::ADAPTIVE) { size_t num_strands = std::min(taskLimit, threads*32); @@ -51,8 +66,8 @@ SequencedTaskExecutor::create(vespalib::Runnable::init_fun_t func, uint32_t thre executors.reserve(threads); for (uint32_t id = 0; id < threads; ++id) { if (optimize == OptimizeFor::THROUGHPUT) { - uint32_t watermark = kindOfWatermark == 0 ? taskLimit / 2 : kindOfWatermark; - executors.push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, reactionTime)); + uint32_t watermark = (kindOfWatermark == 0) ? taskLimit / 10 : kindOfWatermark; + executors.push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, 100ms)); } else { executors.push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit, func)); } @@ -167,7 +182,7 @@ SequencedTaskExecutor::getExecutorIdImPerfect(uint64_t componentId) const { return ExecutorId(executorId); } -const vespalib::SyncableThreadExecutor* +const vespalib::ThreadExecutor* SequencedTaskExecutor::first_executor() const { if (_executors.empty()) { diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h index 7bb56424849..91304a6a2e3 100644 --- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h +++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h @@ -8,6 +8,7 @@ namespace vespalib { +class ThreadExecutor; class SyncableThreadExecutor; /** @@ -29,19 +30,21 @@ public: ExecutorStats getStats() override; void wakeup() override; - /* - * Note that if you choose Optimize::THROUGHPUT, you must ensure only a single producer, or synchronize on the outside. - * - */ static std::unique_ptr<ISequencedTaskExecutor> - create(vespalib::Runnable::init_fun_t, uint32_t threads, uint32_t taskLimit = 1000, - OptimizeFor optimize = OptimizeFor::LATENCY, uint32_t kindOfWatermark = 0, duration reactionTime = 10ms); + create(Runnable::init_fun_t func, uint32_t threads); + static std::unique_ptr<ISequencedTaskExecutor> + create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit); + static std::unique_ptr<ISequencedTaskExecutor> + create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit, OptimizeFor optimize); + static std::unique_ptr<ISequencedTaskExecutor> + create(Runnable::init_fun_t func, uint32_t threads, uint32_t taskLimit, + OptimizeFor optimize, uint32_t kindOfWatermark); /** * For testing only */ uint32_t getComponentHashSize() const { return _component2IdImperfect.size(); } uint32_t getComponentEffectiveHashSize() const { return _nextId; } - const vespalib::SyncableThreadExecutor* first_executor() const; + const vespalib::ThreadExecutor* first_executor() const; private: explicit SequencedTaskExecutor(std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> executor); diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp index af95918ccab..a99bce0a705 100644 --- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp @@ -7,18 +7,19 @@ namespace vespalib { SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit) - : SingleExecutor(func, taskLimit, taskLimit/10, 5ms) + : SingleExecutor(func, taskLimit, taskLimit/10, 100ms) { } SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t watermark, duration reactionTime) - : _taskLimit(vespalib::roundUp2inN(taskLimit)), + : _watermarkRatio(watermark < taskLimit ? double(watermark) / taskLimit : 1.0), + _taskLimit(vespalib::roundUp2inN(taskLimit)), _wantedTaskLimit(_taskLimit.load()), _rp(0), _tasks(std::make_unique<Task::UP[]>(_taskLimit)), _mutex(), _consumerCondition(), _producerCondition(), - _thread(*this), + _thread(*this, func), _idleTracker(steady_clock::now()), _threadIdleTracker(), _wakeupCount(0), @@ -27,11 +28,10 @@ SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t wat _wakeupConsumerAt(0), _producerNeedWakeupAt(0), _wp(0), - _watermark(std::min(_taskLimit.load(), watermark)), + _watermark(_taskLimit.load()*_watermarkRatio), _reactionTime(reactionTime), _closed(false) { - (void) func; //TODO implement similar to ThreadStackExecutor assert(taskLimit >= watermark); _thread.start(); } @@ -75,7 +75,7 @@ SingleExecutor::execute(Task::UP task) { void SingleExecutor::setTaskLimit(uint32_t taskLimit) { - _wantedTaskLimit = std::max(vespalib::roundUp2inN(taskLimit), size_t(_watermark)); + _wantedTaskLimit = vespalib::roundUp2inN(taskLimit); } void @@ -89,7 +89,9 @@ SingleExecutor::drain(Lock & lock) { void SingleExecutor::wakeup() { - _consumerCondition.notify_one(); + if (numTasks() > 0) { + _consumerCondition.notify_one(); + } } SingleExecutor & @@ -115,7 +117,7 @@ SingleExecutor::run() { while (!_thread.stopped()) { drain_tasks(); _producerCondition.notify_all(); - _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + _watermark, std::memory_order_relaxed); + _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + get_watermark(), std::memory_order_relaxed); Lock lock(_mutex); if (numTasks() <= 0) { steady_time now = steady_clock::now(); @@ -157,10 +159,11 @@ SingleExecutor::wait_for_room(Lock & lock) { drain(lock); _tasks = std::make_unique<Task::UP[]>(_wantedTaskLimit); _taskLimit = _wantedTaskLimit.load(); + _watermark = _taskLimit * _watermarkRatio; } _queueSize.add(numTasks()); while (numTasks() >= _taskLimit.load(std::memory_order_relaxed)) { - sleepProducer(lock, _reactionTime, wp - _watermark); + sleepProducer(lock, _reactionTime, wp - get_watermark()); } } diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h index 7d868322558..e76e3f17a41 100644 --- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h +++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h @@ -28,7 +28,7 @@ public: void wakeup() override; size_t getNumThreads() const override; uint32_t getTaskLimit() const override { return _taskLimit.load(std::memory_order_relaxed); } - uint32_t get_watermark() const { return _watermark; } + uint32_t get_watermark() const { return _watermark.load(std::memory_order_relaxed); } duration get_reaction_time() const { return _reactionTime; } ExecutorStats getStats() override; SingleExecutor & shutdown() override; @@ -47,6 +47,7 @@ private: uint64_t numTasks() const { return _wp.load(std::memory_order_relaxed) - _rp.load(std::memory_order_acquire); } + const double _watermarkRatio; std::atomic<uint32_t> _taskLimit; std::atomic<uint32_t> _wantedTaskLimit; std::atomic<uint64_t> _rp; @@ -63,7 +64,7 @@ private: std::atomic<uint64_t> _wakeupConsumerAt; std::atomic<uint64_t> _producerNeedWakeupAt; std::atomic<uint64_t> _wp; - const uint32_t _watermark; + std::atomic<uint32_t> _watermark; const duration _reactionTime; bool _closed; }; diff --git a/standalone-container/src/main/sh/standalone-container.sh b/standalone-container/src/main/sh/standalone-container.sh index 9edea41ac8b..b34535c6867 100755 --- a/standalone-container/src/main/sh/standalone-container.sh +++ b/standalone-container/src/main/sh/standalone-container.sh @@ -97,6 +97,11 @@ Fail() { FixDataDirectory() { if ! [ -d "$1" ]; then + if [ -e "$1" ]; then + # TODO: Remove this if-branch once >=7.511 has rolled out everywhere + echo "Removing file '$1'" + rm "$1" + fi echo "Creating data directory '$1'" mkdir -p "$1" || exit 1 fi @@ -156,6 +161,7 @@ StartCommand() { FixDataDirectory "$(dirname "$cfpfile")" printenv > "$cfpfile" FixDataDirectory "$bundlecachedir" + FixDataDirectory "$VESPA_HOME/var/crash" java \ -Xms128m -Xmx2048m \ @@ -169,7 +175,6 @@ StartCommand() { --add-opens=java.base/java.nio=ALL-UNNAMED \ --add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ - --add-opens=java.base/sun.security.util=ALL-UNNAMED \ -Djava.library.path="$VESPA_HOME/lib64" \ -Djava.awt.headless=true \ -Dsun.rmi.dgc.client.gcInterval=3600000 \ diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp index aefda3660df..bb72887f69b 100644 --- a/storage/src/tests/bucketdb/bucketmanagertest.cpp +++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/config/helper/configgetter.hpp> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/config/config-documenttypes.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/fieldvalue/document.h> @@ -29,7 +30,6 @@ LOG_SETUP(".test.bucketdb.bucketmanager"); using config::ConfigGetter; -using document::DocumenttypesConfig; using config::FileSpec; using document::DocumentType; using document::DocumentTypeRepo; diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp index 41f811a77a6..dfe4f09de3f 100644 --- a/storage/src/tests/distributor/getoperationtest.cpp +++ b/storage/src/tests/distributor/getoperationtest.cpp @@ -3,6 +3,7 @@ #include <tests/distributor/distributor_stripe_test_util.h> #include <vespa/config/helper/configgetter.h> #include <vespa/config/helper/configgetter.hpp> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/config/config-documenttypes.h> #include <vespa/document/fieldset/fieldsets.h> #include <vespa/document/repo/documenttyperepo.h> @@ -20,7 +21,6 @@ using std::shared_ptr; using config::ConfigGetter; -using document::DocumenttypesConfig; using config::FileSpec; using document::test::makeDocumentBucket; using document::BucketId; diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp index 54bd06c98e0..9e0c89819a7 100644 --- a/storage/src/tests/distributor/mergeoperationtest.cpp +++ b/storage/src/tests/distributor/mergeoperationtest.cpp @@ -25,6 +25,7 @@ vespalib::string _g_storage("storage"); } struct MergeOperationTest : Test, DistributorStripeTestUtil { + using Priority = storage::api::StorageMessage::Priority; OperationSequencer _operation_sequencer; void SetUp() override { @@ -38,8 +39,9 @@ struct MergeOperationTest : Test, DistributorStripeTestUtil { } std::shared_ptr<MergeOperation> setup_minimal_merge_op(); - std::shared_ptr<MergeOperation> setup_simple_merge_op(const std::vector<uint16_t>& nodes); - std::shared_ptr<MergeOperation> setup_simple_merge_op(); + std::shared_ptr<MergeOperation> setup_simple_merge_op(const std::vector<uint16_t>& nodes, + Priority merge_pri = 120); + std::shared_ptr<MergeOperation> setup_simple_merge_op(Priority merge_pri = 120); void assert_simple_merge_bucket_command(); void assert_simple_delete_bucket_command(); MergeBucketMetricSet& get_merge_metrics(); @@ -55,7 +57,7 @@ MergeOperationTest::setup_minimal_merge_op() } std::shared_ptr<MergeOperation> -MergeOperationTest::setup_simple_merge_op(const std::vector<uint16_t>& nodes) +MergeOperationTest::setup_simple_merge_op(const std::vector<uint16_t>& nodes, Priority merge_pri) { getClock().setAbsoluteTimeInSeconds(10); @@ -68,14 +70,15 @@ MergeOperationTest::setup_simple_merge_op(const std::vector<uint16_t>& nodes) auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), nodes)); op->setIdealStateManager(&getIdealStateManager()); + op->setPriority(merge_pri); op->start(_sender, framework::MilliSecTime(0)); return op; } std::shared_ptr<MergeOperation> -MergeOperationTest::setup_simple_merge_op() +MergeOperationTest::setup_simple_merge_op(Priority merge_pri) { - return setup_simple_merge_op({0, 1, 2}); + return setup_simple_merge_op({0, 1, 2}, merge_pri); } void @@ -603,4 +606,26 @@ TEST_F(MergeOperationTest, unordered_merges_only_sent_iff_config_enabled_and_all _sender.getLastCommand(true)); } +TEST_F(MergeOperationTest, delete_bucket_inherits_merge_priority) { + auto op = setup_simple_merge_op(Priority(125)); + ASSERT_NO_FATAL_FAILURE(assert_simple_merge_bucket_command()); + sendReply(*op); + ASSERT_NO_FATAL_FAILURE(assert_simple_delete_bucket_command()); + auto del_cmd = std::dynamic_pointer_cast<api::DeleteBucketCommand>(_sender.commands().back()); + ASSERT_TRUE(del_cmd); + EXPECT_EQ(int(del_cmd->getPriority()), int(op->getPriority())); + EXPECT_EQ(int(del_cmd->getPriority()), 125); +} + +// TODO less magical numbers, but the priority mapping is technically config... +TEST_F(MergeOperationTest, delete_bucket_priority_is_capped_to_feed_pri_120) { + auto op = setup_simple_merge_op(Priority(119)); + ASSERT_NO_FATAL_FAILURE(assert_simple_merge_bucket_command()); + sendReply(*op); + ASSERT_NO_FATAL_FAILURE(assert_simple_delete_bucket_command()); + auto del_cmd = std::dynamic_pointer_cast<api::DeleteBucketCommand>(_sender.commands().back()); + ASSERT_TRUE(del_cmd); + EXPECT_EQ(int(del_cmd->getPriority()), 120); +} + } // storage::distributor diff --git a/storage/src/tests/distributor/operationtargetresolvertest.cpp b/storage/src/tests/distributor/operationtargetresolvertest.cpp index dca54e22f0d..2d41b0f4d32 100644 --- a/storage/src/tests/distributor/operationtargetresolvertest.cpp +++ b/storage/src/tests/distributor/operationtargetresolvertest.cpp @@ -34,7 +34,7 @@ struct OperationTargetResolverTest : Test, DistributorStripeTestUtil { void SetUp() override { _repo.reset(new document::DocumentTypeRepo( - *config::ConfigGetter<document::DocumenttypesConfig>::getConfig( + *config::ConfigGetter<DocumenttypesConfig>::getConfig( "config-doctypes", config::FileSpec("../config-doctypes.cfg")))); _html_type = _repo->getDocumentType("text/html"); diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp index 6ad67ab91c2..b02395717e0 100644 --- a/storage/src/tests/distributor/putoperationtest.cpp +++ b/storage/src/tests/distributor/putoperationtest.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <tests/distributor/distributor_stripe_test_util.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/test/make_document_bucket.h> #include <vespa/storage/distributor/top_level_distributor.h> @@ -15,7 +16,6 @@ using std::shared_ptr; using config::ConfigGetter; -using document::DocumenttypesConfig; using config::FileSpec; using vespalib::string; using namespace document; @@ -51,9 +51,8 @@ public: document::BucketId createAndSendSampleDocument(vespalib::duration timeout); void sendReply(int idx = -1, - api::ReturnCode::Result result - = api::ReturnCode::OK, - api::BucketInfo info = api::BucketInfo(1,2,3,4,5)) + api::ReturnCode::Result result = api::ReturnCode::OK, + api::BucketInfo info = api::BucketInfo(1,2,3,4,5)) { ASSERT_FALSE(_sender.commands().empty()); if (idx == -1) { @@ -152,6 +151,33 @@ TEST_F(PutOperationTest, bucket_database_gets_special_entry_when_CreateBucket_se ASSERT_EQ("Create bucket => 0,Put => 0", _sender.getCommands(true)); } +TEST_F(PutOperationTest, failed_CreateBucket_removes_replica_from_db_and_sends_RequestBucketInfo) { + setup_stripe(2, 2, "distributor:1 storage:2"); + + auto doc = createDummyDocument("test", "test"); + sendPut(createPut(doc)); + + ASSERT_EQ("Create bucket => 1,Create bucket => 0,Put => 1,Put => 0", _sender.getCommands(true)); + + // Simulate timeouts on node 1. Replica existence is in a Schrödinger's cat state until we send + // a RequestBucketInfo to the node and open the box to find out for sure. + sendReply(0, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket + sendReply(2, api::ReturnCode::TIMEOUT, api::BucketInfo()); // Put + // Pretend everything went fine on node 0 + sendReply(1); // CreateBucket + sendReply(3); // Put + + ASSERT_EQ("BucketId(0x4000000000008f09) : " + "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)", + dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId()))); + + // TODO remove revert concept; does not make sense with Proton (since it's not a multi-version store and + // therefore does not have anything to revert back to) and is config-disabled by default for this provider. + ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1," + "Revert(BucketId(0x4000000000008f09)) => 0", + _sender.getCommands(true, true, 4)); +} + TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) { setup_stripe(1, 1, "storage:1 distributor:1"); auto cfg = make_config(); diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp index f5531a134d0..d481370b2c1 100644 --- a/storage/src/tests/distributor/statecheckerstest.cpp +++ b/storage/src/tests/distributor/statecheckerstest.cpp @@ -858,7 +858,7 @@ TEST_F(StateCheckersTest, delete_extra_copies) { EXPECT_EQ("[Removing all copies since bucket is empty:node(idx=0,crc=0x0," "docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]" - " (pri 100)", + " (pri 120)", testDeleteExtraCopies("0=0", 2, PendingMessage(), "", true)) << "Remove empty buckets"; EXPECT_EQ("[Removing redundant in-sync copy from node 2]", diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp index 3ed5e9f4a8d..1632867b627 100644 --- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp +++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp @@ -2548,6 +2548,55 @@ TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_onl } } +TEST_F(TopLevelBucketDBUpdaterTest, node_feature_sets_are_aggregated_from_nodes_and_propagated_to_stripes) { + lib::ClusterState state("distributor:1 storage:3"); + set_cluster_state(state); + uint32_t expected_msgs = message_count(3), dummy_buckets_to_return = 1; + + // Known feature sets are initially empty. + auto stripes = distributor_stripes(); + for (auto* s : stripes) { + for (uint16_t i : {0, 1, 2}) { + EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(i).unordered_merge_chaining); + } + } + + ASSERT_EQ(expected_msgs, _sender.commands().size()); + for (uint32_t i = 0; i < _sender.commands().size(); i++) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), + dummy_buckets_to_return, [i](auto& reply) noexcept { + // Pretend nodes 1 and 2 are on a shiny version with unordered merge chaining supported. + // Node 0 does not support the fanciness. + if (i > 0) { + reply.supported_node_features().unordered_merge_chaining = true; + } + })); + } + + // Node features should be propagated to all stripes + for (auto* s : stripes) { + EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(0).unordered_merge_chaining); + EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(1).unordered_merge_chaining); + EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(2).unordered_merge_chaining); + } +} + +TEST_F(TopLevelBucketDBUpdaterTest, outdated_bucket_info_reply_is_ignored) { + set_cluster_state("version:1 distributor:1 storage:1"); + ASSERT_EQ(message_count(1), _sender.commands().size()); + auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.commands().front()); + _sender.clear(); + // Force a new pending cluster state which overwrites the pending one. + lib::ClusterState new_state("version:2 distributor:1 storage:2"); + set_cluster_state(new_state); + + const api::StorageMessageAddress& address(*req->getAddress()); + bool handled = bucket_db_updater().onRequestBucketInfoReply( + make_fake_bucket_reply(new_state, *req, address.getIndex(), 0, 0)); + EXPECT_TRUE(handled); // Should be returned as handled even though it's technically ignored. +} + + struct BucketDBUpdaterSnapshotTest : TopLevelBucketDBUpdaterTest { lib::ClusterState empty_state; std::shared_ptr<lib::ClusterState> initial_baseline; @@ -2678,37 +2727,4 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabl EXPECT_FALSE(def_rs.is_routable()); } -TEST_F(BucketDBUpdaterSnapshotTest, node_feature_sets_are_aggregated_from_nodes_and_propagated_to_stripes) { - lib::ClusterState state("distributor:1 storage:3"); - set_cluster_state(state); - uint32_t expected_msgs = message_count(3), dummy_buckets_to_return = 1; - - // Known feature sets are initially empty. - auto stripes = distributor_stripes(); - for (auto* s : stripes) { - for (uint16_t i : {0, 1, 2}) { - EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(i).unordered_merge_chaining); - } - } - - ASSERT_EQ(expected_msgs, _sender.commands().size()); - for (uint32_t i = 0; i < _sender.commands().size(); i++) { - ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), - dummy_buckets_to_return, [i](auto& reply) noexcept { - // Pretend nodes 1 and 2 are on a shiny version with unordered merge chaining supported. - // Node 0 does not support the fanciness. - if (i > 0) { - reply.supported_node_features().unordered_merge_chaining = true; - } - })); - } - - // Node features should be propagated to all stripes - for (auto* s : stripes) { - EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(0).unordered_merge_chaining); - EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(1).unordered_merge_chaining); - EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(2).unordered_merge_chaining); - } -} - } diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp index 9b229198043..5aa2a3e5662 100644 --- a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp +++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp @@ -3,6 +3,7 @@ #include <tests/distributor/distributor_stripe_test_util.h> #include <vespa/config/helper/configgetter.h> #include <vespa/document/base/testdocrepo.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/fieldset/fieldsets.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/test/make_document_bucket.h> @@ -19,7 +20,6 @@ namespace storage::distributor { using document::test::makeDocumentBucket; using config::ConfigGetter; -using document::DocumenttypesConfig; using namespace document; using namespace storage; using namespace storage::distributor; diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp index db974e2202c..f0cb30368cb 100644 --- a/storage/src/tests/distributor/updateoperationtest.cpp +++ b/storage/src/tests/distributor/updateoperationtest.cpp @@ -14,15 +14,15 @@ #include <vespa/storageapi/message/state.h> #include <vespa/vespalib/gtest/gtest.h> +using config::ConfigGetter; +using config::FileSpec; +using vespalib::string; +using document::test::makeDocumentBucket; using namespace document; using namespace storage::api; using namespace std; using namespace storage::lib; using namespace ::testing; -using config::ConfigGetter; -using config::FileSpec; -using vespalib::string; -using document::test::makeDocumentBucket; namespace storage::distributor { diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt index f0deec90aae..7b165e11b66 100644 --- a/storage/src/tests/persistence/CMakeLists.txt +++ b/storage/src/tests/persistence/CMakeLists.txt @@ -2,6 +2,7 @@ vespa_add_executable(storage_persistence_gtest_runner_app TEST SOURCES + active_operations_stats_test.cpp apply_bucket_diff_state_test.cpp bucketownershipnotifiertest.cpp has_mask_remapper_test.cpp diff --git a/storage/src/tests/persistence/active_operations_stats_test.cpp b/storage/src/tests/persistence/active_operations_stats_test.cpp new file mode 100644 index 00000000000..a5dd3d929db --- /dev/null +++ b/storage/src/tests/persistence/active_operations_stats_test.cpp @@ -0,0 +1,150 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/persistence/dummyimpl/dummypersistence.h> +#include <tests/persistence/common/filestortestfixture.h> +#include <tests/persistence/filestorage/forwardingmessagesender.h> +#include <vespa/storage/persistence/filestorage/filestormetrics.h> +#include <vespa/document/test/make_document_bucket.h> +#include <vespa/document/fieldset/fieldsets.h> +#include <vespa/metrics/updatehook.h> + +using document::test::makeDocumentBucket; + +namespace storage { + +class ActiveOperationsStatsTest : public FileStorTestFixture +{ +protected: + DummyStorageLink top; + std::unique_ptr<DummyStorageLink> dummyManager; + ForwardingMessageSender messageSender; + FileStorMetrics metrics; + std::unique_ptr<FileStorHandler> filestorHandler; + uint32_t stripeId; + +public: + ActiveOperationsStatsTest(); + ~ActiveOperationsStatsTest() override; + std::shared_ptr<api::StorageMessage> createPut(uint64_t bucket, uint64_t docIdx); + std::shared_ptr<api::StorageMessage> createGet(uint64_t bucket) const; + + void assert_active_operations_stats(const ActiveOperationsStats &stats, uint32_t exp_active_size, uint32_t exp_size_samples, uint32_t exp_latency_samples); + void update_metrics(); + void test_active_operations_stats(); +}; + +ActiveOperationsStatsTest::ActiveOperationsStatsTest() + : FileStorTestFixture(), + top(), + dummyManager(std::make_unique<DummyStorageLink>()), + messageSender(*dummyManager), + metrics(), + stripeId(0) +{ + setupPersistenceThreads(1); + _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo())); + top.push_back(std::move(dummyManager)); + top.open(); + metrics.initDiskMetrics(1, 1); + filestorHandler = std::make_unique<FileStorHandlerImpl>(messageSender, metrics, + _node->getComponentRegister()); + filestorHandler->setGetNextMessageTimeout(20ms); +} + +ActiveOperationsStatsTest::~ActiveOperationsStatsTest() = default; + +std::shared_ptr<api::StorageMessage> +ActiveOperationsStatsTest::createPut(uint64_t bucket, uint64_t docIdx) +{ + auto doc = _node->getTestDocMan().createDocument( + "foobar", vespalib::make_string("id:foo:testdoctype1:n=%" PRIu64 ":%" PRIu64, bucket, docIdx)); + auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(document::BucketId(16, bucket)), std::move(doc), 1234); + cmd->setAddress(makeSelfAddress()); + return cmd; +} + +std::shared_ptr<api::StorageMessage> +ActiveOperationsStatsTest::createGet(uint64_t bucket) const +{ + auto cmd = std::make_shared<api::GetCommand>( + makeDocumentBucket(document::BucketId(16, bucket)), + document::DocumentId(vespalib::make_string("id:foo:testdoctype1:n=%" PRIu64 ":0", bucket)), document::AllFields::NAME); + cmd->setAddress(makeSelfAddress()); + return cmd; +} + +void +ActiveOperationsStatsTest::assert_active_operations_stats(const ActiveOperationsStats &stats, uint32_t exp_active_size, uint32_t exp_size_samples, uint32_t exp_latency_samples) +{ + EXPECT_EQ(exp_active_size, stats.get_active_size()); + EXPECT_EQ(exp_size_samples, stats.get_size_samples()); + EXPECT_EQ(exp_latency_samples, stats.get_latency_samples()); +} + +void +ActiveOperationsStatsTest::update_metrics() +{ + std::mutex dummy_lock; + auto &impl = dynamic_cast<FileStorHandlerImpl&>(*filestorHandler); + auto& hook = impl.get_metric_update_hook_for_testing(); + hook.updateMetrics(metrics::MetricLockGuard(dummy_lock)); +} + +void +ActiveOperationsStatsTest::test_active_operations_stats() +{ + auto lock0 = filestorHandler->getNextMessage(stripeId); + auto lock1 = filestorHandler->getNextMessage(stripeId); + auto lock2 = filestorHandler->getNextMessage(stripeId); + ASSERT_TRUE(lock0.first); + ASSERT_TRUE(lock1.first); + ASSERT_FALSE(lock2.first); + auto stats = filestorHandler->get_active_operations_stats(false); + { + SCOPED_TRACE("during"); + assert_active_operations_stats(stats, 2, 2, 0); + } + EXPECT_EQ(3, stats.get_total_size()); + lock0.first.reset(); + lock1.first.reset(); + stats = filestorHandler->get_active_operations_stats(false); + { + SCOPED_TRACE("after"); + assert_active_operations_stats(stats, 0, 4, 2); + } + EXPECT_EQ(4, stats.get_total_size()); + EXPECT_LT(0.0, stats.get_total_latency()); + update_metrics(); + auto &ao_metrics = metrics.disk->active_operations; + EXPECT_DOUBLE_EQ(1.0, ao_metrics.size.getAverage()); + EXPECT_DOUBLE_EQ(0.0, ao_metrics.size.getMinimum()); + EXPECT_DOUBLE_EQ(2.0, ao_metrics.size.getMaximum()); + EXPECT_DOUBLE_EQ(4.0, ao_metrics.size.getCount()); + EXPECT_LT(0.0, ao_metrics.latency.getAverage()); + EXPECT_LT(0.0, ao_metrics.latency.getMinimum()); + EXPECT_LT(0.0, ao_metrics.latency.getMaximum()); + EXPECT_DOUBLE_EQ(2.0, ao_metrics.latency.getCount()); +} + +TEST_F(ActiveOperationsStatsTest, empty_stats) +{ + auto stats = filestorHandler->get_active_operations_stats(false); + assert_active_operations_stats(stats, 0, 0, 0); +} + +TEST_F(ActiveOperationsStatsTest, exclusive_lock_active_operations_stats) +{ + filestorHandler->schedule(createPut(1234, 0)); + filestorHandler->schedule(createPut(1234, 1)); + filestorHandler->schedule(createPut(5432, 0)); + test_active_operations_stats(); +} + +TEST_F(ActiveOperationsStatsTest, shared_lock_active_operations_stats) +{ + filestorHandler->schedule(createGet(1234)); + filestorHandler->schedule(createGet(1234)); + test_active_operations_stats(); +} + +} diff --git a/storage/src/tests/persistence/apply_bucket_diff_state_test.cpp b/storage/src/tests/persistence/apply_bucket_diff_state_test.cpp index 701e8a80d3a..ec57d775f43 100644 --- a/storage/src/tests/persistence/apply_bucket_diff_state_test.cpp +++ b/storage/src/tests/persistence/apply_bucket_diff_state_test.cpp @@ -2,11 +2,17 @@ #include <vespa/storage/persistence/apply_bucket_diff_state.h> #include <vespa/storage/persistence/merge_bucket_info_syncer.h> +#include <vespa/storage/persistence/filestorage/merge_handler_metrics.h> +#include <vespa/storageapi/message/bucket.h> #include <vespa/document/base/documentid.h> #include <vespa/document/bucket/bucketid.h> #include <vespa/document/bucket/bucketidfactory.h> #include <vespa/document/test/make_document_bucket.h> +#include <vespa/metrics/metricset.h> #include <vespa/persistence/spi/result.h> +#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h> +#include <tests/common/message_sender_stub.h> +#include <tests/persistence/persistencetestutils.h> #include <gtest/gtest.h> using document::DocumentId; @@ -70,25 +76,42 @@ void push_bad(ApplyBucketDiffState &state) } -class ApplyBucketDiffStateTestBase : public ::testing::Test +class ApplyBucketDiffStateTestBase : public PersistenceTestUtils { public: uint32_t sync_count; DummyMergeBucketInfoSyncer syncer; + metrics::MetricSet merge_handler_metrics_owner; + MergeHandlerMetrics merge_handler_metrics; + FileStorThreadMetrics::Op op_metrics; + framework::defaultimplementation::FakeClock clock; + MessageSenderStub message_sender; MonitoredRefCount monitored_ref_count; ApplyBucketDiffStateTestBase() - : ::testing::Test(), + : PersistenceTestUtils(), sync_count(0u), - syncer(sync_count) + syncer(sync_count), + merge_handler_metrics_owner("owner", {}, "owner"), + merge_handler_metrics(&merge_handler_metrics_owner), + op_metrics("op", "op", &merge_handler_metrics_owner), + clock(), + monitored_ref_count() { } ~ApplyBucketDiffStateTestBase(); std::shared_ptr<ApplyBucketDiffState> make_state() { - return ApplyBucketDiffState::create(syncer, spi::Bucket(dummy_document_bucket), RetainGuard(monitored_ref_count)); + return ApplyBucketDiffState::create(syncer, merge_handler_metrics, clock, spi::Bucket(dummy_document_bucket), RetainGuard(monitored_ref_count)); } + + MessageTracker::UP + create_tracker(std::shared_ptr<api::StorageMessage> cmd, document::Bucket bucket) { + return MessageTracker::createForTesting(framework::MilliSecTimer(clock), getEnv(), + message_sender, NoBucketLock::make(bucket), std::move(cmd)); + } + }; ApplyBucketDiffStateTestBase::~ApplyBucketDiffStateTestBase() = default; @@ -118,8 +141,44 @@ public: check_failure("Failed put for id::test::1 in Bucket(0x0000000000000010): Result(5, write blocked)"); } + void test_delayed_reply(bool failed, bool async_failed, bool chained_reply); + }; +void +ApplyBucketDiffStateTest::test_delayed_reply(bool failed, bool async_failed, bool chained_reply) +{ + auto cmd = std::make_shared<api::MergeBucketCommand>(dummy_document_bucket, std::vector<api::MergeBucketCommand::Node>{}, 0); + std::shared_ptr<api::StorageReply> reply = cmd->makeReply(); + auto tracker = create_tracker(cmd, dummy_document_bucket); + if (failed) { + reply->setResult(api::ReturnCode::Result::INTERNAL_FAILURE); + } + tracker->setMetric(op_metrics); + tracker->setReply(reply); + if (chained_reply) { + state->set_delayed_reply(std::move(tracker), message_sender, &op_metrics, framework::MilliSecTimer(clock), std::move(reply)); + } else { + state->set_delayed_reply(std::move(tracker), std::move(reply)); + } + clock.addMilliSecondsToTime(16); + if (async_failed) { + push_bad(*state); + } + state.reset(); + if (failed || async_failed) { + EXPECT_EQ(0.0, op_metrics.latency.getLast()); + EXPECT_EQ(0, op_metrics.latency.getCount()); + EXPECT_EQ(1, op_metrics.failed.getValue()); + } else { + EXPECT_EQ(16.0, op_metrics.latency.getLast()); + EXPECT_EQ(1, op_metrics.latency.getCount()); + EXPECT_EQ(0, op_metrics.failed.getValue()); + } + ASSERT_EQ(1, message_sender.replies.size()); + EXPECT_NE(failed || async_failed, std::dynamic_pointer_cast<api::MergeBucketReply>(message_sender.replies.front())->getResult().success()); +} + TEST_F(ApplyBucketDiffStateTest, ok_results_can_be_checked) { push_ok(*state); @@ -168,4 +227,54 @@ TEST_F(ApplyBucketDiffStateTest, failed_sync_bucket_info_is_detected) check_failure(fail); } +TEST_F(ApplyBucketDiffStateTest, data_write_latency_is_updated) +{ + clock.addMilliSecondsToTime(10); + state.reset(); + EXPECT_EQ(10.0, merge_handler_metrics.mergeDataWriteLatency.getLast()); + EXPECT_EQ(1, merge_handler_metrics.mergeDataWriteLatency.getCount()); +} + +TEST_F(ApplyBucketDiffStateTest, total_latency_is_not_updated) +{ + clock.addMilliSecondsToTime(14); + state.reset(); + EXPECT_EQ(0.0, merge_handler_metrics.mergeLatencyTotal.getLast()); + EXPECT_EQ(0, merge_handler_metrics.mergeLatencyTotal.getCount()); +} + +TEST_F(ApplyBucketDiffStateTest, total_latency_is_updated) +{ + state->set_merge_start_time(framework::MilliSecTimer(clock)); + clock.addMilliSecondsToTime(14); + state.reset(); + EXPECT_EQ(14.0, merge_handler_metrics.mergeLatencyTotal.getLast()); + EXPECT_EQ(1, merge_handler_metrics.mergeLatencyTotal.getCount()); +} + +TEST_F(ApplyBucketDiffStateTest, delayed_ok_reply) +{ + test_delayed_reply(false, false, false); +} + +TEST_F(ApplyBucketDiffStateTest, delayed_failed_reply) +{ + test_delayed_reply(true, false, false); +} + +TEST_F(ApplyBucketDiffStateTest, delayed_ok_chained_reply) +{ + test_delayed_reply(false, false, true); +} + +TEST_F(ApplyBucketDiffStateTest, delayed_failed_chained_reply) +{ + test_delayed_reply(true, false, true); +} + +TEST_F(ApplyBucketDiffStateTest, delayed_async_failed_reply) +{ + test_delayed_reply(false, true, false); +} + } diff --git a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp index 597bb4b07ff..6c9be3d1376 100644 --- a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp +++ b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp @@ -14,11 +14,46 @@ using namespace ::testing; namespace storage { struct DeactivateBucketsTest : FileStorTestFixture { - bool isActive(const document::BucketId&) const; + std::unique_ptr<TestFileStorComponents> _c; + + [[nodiscard]] bool is_active(const document::BucketId&) const; + + void SetUp() override { + FileStorTestFixture::SetUp(); + _c = std::make_unique<TestFileStorComponents>(*this); + + std::string up_state("storage:2 distributor:2"); + _node->getStateUpdater().setClusterState( + std::make_shared<const lib::ClusterState>(up_state)); + + createBucket(test_bucket()); + + api::BucketInfo serviceLayerInfo(1, 2, 3, 4, 5, true, true); + { + StorBucketDatabase::WrappedEntry entry( + _node->getStorageBucketDatabase().get(test_bucket(), "foo", + StorBucketDatabase::CREATE_IF_NONEXISTING)); + entry->info = serviceLayerInfo; + entry.write(); + } + } + + void TearDown() override { + _c.reset(); + FileStorTestFixture::TearDown(); + } + + static document::BucketId test_bucket() noexcept { + return {8, 123}; + } + + static std::shared_ptr<const lib::ClusterState> state_of(const char* str) { + return std::make_shared<const lib::ClusterState>(str); + } }; bool -DeactivateBucketsTest::isActive(const document::BucketId& bucket) const +DeactivateBucketsTest::is_active(const document::BucketId& bucket) const { StorBucketDatabase::WrappedEntry entry( _node->getStorageBucketDatabase().get(bucket, "foo")); @@ -26,31 +61,53 @@ DeactivateBucketsTest::isActive(const document::BucketId& bucket) const return entry->info.isActive(); } -TEST_F(DeactivateBucketsTest, buckets_in_database_deactivated_when_node_down_in_cluster_state) { - TestFileStorComponents c(*this); - // Must set state to up first, or down-edge case won't trigger. - std::string upState("storage:2 distributor:2"); - _node->getStateUpdater().setClusterState( - lib::ClusterState::CSP(new lib::ClusterState(upState))); - - document::BucketId bucket(8, 123); - - createBucket(bucket); - api::BucketInfo serviceLayerInfo(1, 2, 3, 4, 5, true, true); - { - StorBucketDatabase::WrappedEntry entry( - _node->getStorageBucketDatabase().get(bucket, "foo", - StorBucketDatabase::CREATE_IF_NONEXISTING)); - entry->info = serviceLayerInfo; - entry.write(); - } - EXPECT_TRUE(isActive(bucket)); - std::string downState("storage:2 .1.s:d distributor:2"); - _node->getStateUpdater().setClusterState( - lib::ClusterState::CSP(new lib::ClusterState(downState))); - +TEST_F(DeactivateBucketsTest, buckets_deactivated_when_node_marked_down) +{ + EXPECT_TRUE(is_active(test_bucket())); + _node->getStateUpdater().setClusterState(state_of("storage:2 .1.s:d distributor:2")); // Buckets should have been deactivated in content layer - EXPECT_FALSE(isActive(bucket)); + EXPECT_FALSE(is_active(test_bucket())); +} + +TEST_F(DeactivateBucketsTest, buckets_not_deactivated_when_node_marked_maintenance) +{ + EXPECT_TRUE(is_active(test_bucket())); + _node->getStateUpdater().setClusterState(state_of("storage:2 .1.s:m distributor:2")); + EXPECT_TRUE(is_active(test_bucket())); +} + +TEST_F(DeactivateBucketsTest, buckets_deactivated_when_node_goes_from_maintenance_to_up) +{ + EXPECT_TRUE(is_active(test_bucket())); + _node->getStateUpdater().setClusterState(state_of("storage:2 .1.s:m distributor:2")); + _node->getStateUpdater().setClusterState(state_of("storage:2 distributor:2")); + EXPECT_FALSE(is_active(test_bucket())); } +TEST_F(DeactivateBucketsTest, buckets_deactivated_when_node_goes_from_maintenance_to_down) +{ + EXPECT_TRUE(is_active(test_bucket())); + _node->getStateUpdater().setClusterState(state_of("storage:2 .1.s:m distributor:2")); + _node->getStateUpdater().setClusterState(state_of("storage:2 .1.s:d distributor:2")); + EXPECT_FALSE(is_active(test_bucket())); +} + +// If we only have a subset of the bucket spaces in maintenance mode (i.e. global +// bucket merge enforcement), we treat this as the node being down from the perspective +// of default space bucket deactivation. +TEST_F(DeactivateBucketsTest, bucket_space_subset_in_maintenance_deactivates_buckets) +{ + EXPECT_TRUE(is_active(test_bucket())); + auto derived = lib::ClusterStateBundle::BucketSpaceStateMapping({ + {document::FixedBucketSpaces::default_space(), state_of("storage:2 .1.s:m distributor:2")}, + {document::FixedBucketSpaces::global_space(), state_of("storage:2 distributor:2")} + }); + _node->getStateUpdater().setClusterStateBundle( + std::make_shared<const lib::ClusterStateBundle>(*state_of("storage:2 .1.s:m distributor:2"), + std::move(derived))); + EXPECT_FALSE(is_active(test_bucket())); +} + +// TODO should also test SPI interaction + } // namespace storage diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp index a3f0182ba30..1752de5fb80 100644 --- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp +++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp @@ -22,6 +22,8 @@ namespace storage { namespace { +VESPA_THREAD_STACK_TAG(test_thread); + // Exploit the fact that PersistenceProviderWrapper already provides a forwarding // implementation of all SPI calls, so we can selectively override. class BlockingMockProvider : public PersistenceProviderWrapper @@ -294,7 +296,7 @@ TEST_F(OperationAbortingTest, wait_for_current_operation_completion_for_aborted_ auto abortCmd = makeAbortCmd(abortSet); SendTask sendTask(abortCmd, *_queueBarrier, c.top); - vespalib::Thread thread(sendTask); + vespalib::Thread thread(sendTask, test_thread); thread.start(); LOG(debug, "waiting for threads to reach barriers"); diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h index fc986c3c6f2..4bbff9bb2ca 100644 --- a/storage/src/tests/persistence/persistencetestutils.h +++ b/storage/src/tests/persistence/persistencetestutils.h @@ -50,6 +50,8 @@ public: api::LockingRequirements lockingRequirements() const noexcept override { return api::LockingRequirements::Shared; } + void signal_operation_sync_phase_done() noexcept override {} + bool wants_sync_phase_done_notification() const noexcept override { return false; } static std::shared_ptr<NoBucketLock> make(document::Bucket bucket) { return std::make_shared<NoBucketLock>(bucket); } @@ -78,6 +80,8 @@ public: api::LockingRequirements lockingRequirements() const noexcept override { return api::LockingRequirements::Exclusive; } + void signal_operation_sync_phase_done() noexcept override {} + bool wants_sync_phase_done_notification() const noexcept override { return false; } static std::shared_ptr<MockBucketLock> make(document::Bucket bucket, MockBucketLocks& locks) { return std::make_shared<MockBucketLock>(bucket, locks); } diff --git a/storage/src/tests/storageserver/documentapiconvertertest.cpp b/storage/src/tests/storageserver/documentapiconvertertest.cpp index ae4ff1c1b21..d4d9d54557d 100644 --- a/storage/src/tests/storageserver/documentapiconvertertest.cpp +++ b/storage/src/tests/storageserver/documentapiconvertertest.cpp @@ -3,6 +3,7 @@ #include <vespa/config/subscription/configuri.h> #include <vespa/document/base/testdocrepo.h> #include <vespa/document/bucket/bucketidfactory.h> +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/datatype/documenttype.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/document/select/parser.h> diff --git a/storage/src/vespa/storage/common/content_bucket_space.cpp b/storage/src/vespa/storage/common/content_bucket_space.cpp index 56bab9c6edc..58f7501d278 100644 --- a/storage/src/vespa/storage/common/content_bucket_space.cpp +++ b/storage/src/vespa/storage/common/content_bucket_space.cpp @@ -11,7 +11,8 @@ ContentBucketSpace::ContentBucketSpace(document::BucketSpace bucketSpace, _lock(), _clusterState(), _distribution(), - _nodeUpInLastNodeStateSeenByProvider(false) + _nodeUpInLastNodeStateSeenByProvider(false), + _nodeMaintenanceInLastNodeStateSeenByProvider(false) { } @@ -57,4 +58,18 @@ ContentBucketSpace::setNodeUpInLastNodeStateSeenByProvider(bool nodeUpInLastNode _nodeUpInLastNodeStateSeenByProvider = nodeUpInLastNodeStateSeenByProvider; } +bool +ContentBucketSpace::getNodeMaintenanceInLastNodeStateSeenByProvider() const +{ + std::lock_guard guard(_lock); + return _nodeMaintenanceInLastNodeStateSeenByProvider; +} + +void +ContentBucketSpace::setNodeMaintenanceInLastNodeStateSeenByProvider(bool nodeMaintenanceInLastNodeStateSeenByProvider) +{ + std::lock_guard guard(_lock); + _nodeMaintenanceInLastNodeStateSeenByProvider = nodeMaintenanceInLastNodeStateSeenByProvider; +} + } diff --git a/storage/src/vespa/storage/common/content_bucket_space.h b/storage/src/vespa/storage/common/content_bucket_space.h index 63379d6b8ee..836cd6e15f8 100644 --- a/storage/src/vespa/storage/common/content_bucket_space.h +++ b/storage/src/vespa/storage/common/content_bucket_space.h @@ -23,6 +23,7 @@ private: std::shared_ptr<const lib::ClusterState> _clusterState; std::shared_ptr<const lib::Distribution> _distribution; bool _nodeUpInLastNodeStateSeenByProvider; + bool _nodeMaintenanceInLastNodeStateSeenByProvider; public: using UP = std::unique_ptr<ContentBucketSpace>; @@ -36,6 +37,8 @@ public: std::shared_ptr<const lib::Distribution> getDistribution() const; bool getNodeUpInLastNodeStateSeenByProvider() const; void setNodeUpInLastNodeStateSeenByProvider(bool nodeUpInLastNodeStateSeenByProvider); + bool getNodeMaintenanceInLastNodeStateSeenByProvider() const; + void setNodeMaintenanceInLastNodeStateSeenByProvider(bool nodeMaintenanceInLastNodeStateSeenByProvider); }; } diff --git a/storage/src/vespa/storage/common/content_bucket_space_repo.h b/storage/src/vespa/storage/common/content_bucket_space_repo.h index 7038e9cb1aa..048c2c266f0 100644 --- a/storage/src/vespa/storage/common/content_bucket_space_repo.h +++ b/storage/src/vespa/storage/common/content_bucket_space_repo.h @@ -21,8 +21,8 @@ private: public: explicit ContentBucketSpaceRepo(const ContentBucketDbOptions&); ContentBucketSpace &get(document::BucketSpace bucketSpace) const; - BucketSpaceMap::const_iterator begin() const { return _map.begin(); } - BucketSpaceMap::const_iterator end() const { return _map.end(); } + BucketSpaceMap::const_iterator begin() const noexcept { return _map.begin(); } + BucketSpaceMap::const_iterator end() const noexcept { return _map.end(); } BucketSpaces getBucketSpaces() const; size_t getBucketMemoryUsage() const; diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def index 8021075faa3..5162e337f24 100644 --- a/storage/src/vespa/storage/config/stor-distributormanager.def +++ b/storage/src/vespa/storage/config/stor-distributormanager.def @@ -103,7 +103,7 @@ priority_activate_no_existing_active int default=100 priority_activate_with_existing_active int default=100 ## Deletion of bucket copy. -priority_delete_bucket_copy int default=100 +priority_delete_bucket_copy int default=120 ## Joining caused by bucket siblings getting sufficiently small to fit into a ## single bucket. diff --git a/storage/src/vespa/storage/distributor/node_supported_features.h b/storage/src/vespa/storage/distributor/node_supported_features.h index fb9cc68e970..647e063f93e 100644 --- a/storage/src/vespa/storage/distributor/node_supported_features.h +++ b/storage/src/vespa/storage/distributor/node_supported_features.h @@ -13,7 +13,9 @@ namespace storage::distributor { struct NodeSupportedFeatures { bool unordered_merge_chaining = false; - bool operator==(const NodeSupportedFeatures&) const noexcept = default; + bool operator==(const NodeSupportedFeatures& rhs) const noexcept { + return unordered_merge_chaining == rhs.unordered_merge_chaining; + }; }; } diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp index 3aa231e1790..5e3fe161f92 100644 --- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp +++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp @@ -33,6 +33,7 @@ UpdateOperation::UpdateOperation(const DistributorNodeContext& node_ctx, _new_timestamp(_msg->getTimestamp()), _is_auto_create_update(_msg->getUpdate()->getCreateIfNonExistent()), _node_ctx(node_ctx), + _op_ctx(op_ctx), _bucketSpace(bucketSpace), _newestTimestampLocation(), _infoAtSendTime(), @@ -155,24 +156,11 @@ UpdateOperation::onReceive(DistributorStripeMessageSender& sender, for (uint32_t i = 0; i < _results.size(); i++) { if (_results[i].oldTs < oldTs) { - LOG(error, "Update operation for '%s' in bucket %s updated documents with different timestamps. " - "This should not happen and may indicate undetected replica divergence. " - "Found ts=%" PRIu64 " on node %u, ts=%" PRIu64 " on node %u", - reply.getDocumentId().toString().c_str(), - reply.getBucket().toString().c_str(), - _results[i].oldTs, _results[i].nodeId, - _results[goodNode].oldTs, _results[goodNode].nodeId); - _metrics.diverging_timestamp_updates.inc(); - + log_inconsistency_warning(reply, _results[goodNode], _results[i]); replyToSend.setNodeWithNewestTimestamp(_results[goodNode].nodeId); _newestTimestampLocation.first = _results[goodNode].bucketId; _newestTimestampLocation.second = _results[goodNode].nodeId; - - LOG(warning, "Bucket info prior to update operation was: %s. After update, " - "info on node %u is %s, info on node %u is %s", - _infoAtSendTime.toString().c_str(), - _results[i].nodeId, _results[i].bucketInfo.toString().c_str(), - _results[goodNode].nodeId, _results[goodNode].bucketInfo.toString().c_str()); + _metrics.diverging_timestamp_updates.inc(); break; } } @@ -186,6 +174,33 @@ UpdateOperation::onReceive(DistributorStripeMessageSender& sender, } void +UpdateOperation::log_inconsistency_warning(const api::UpdateReply& reply, + const PreviousDocumentVersion& highest_timestamped_version, + const PreviousDocumentVersion& low_timestamped_version) +{ + bool low_ts_node_gc = _op_ctx.has_pending_message(low_timestamped_version.nodeId, reply.getBucket(), + api::MessageType::REMOVELOCATION_ID); + bool high_ts_node_gc = _op_ctx.has_pending_message(highest_timestamped_version.nodeId, reply.getBucket(), + api::MessageType::REMOVELOCATION_ID); + + LOG(warning, "Update operation for '%s' in bucket %s updated documents with different timestamps. " + "This should not happen and may indicate undetected replica divergence. " + "Found low ts=%" PRIu64 " on node %u (pending GC: %s), " + "highest ts=%" PRIu64 " on node %u (pending GC: %s)", + reply.getDocumentId().toString().c_str(), + reply.getBucket().toString().c_str(), + low_timestamped_version.oldTs, low_timestamped_version.nodeId, (low_ts_node_gc ? "yes" : "no"), + highest_timestamped_version.oldTs, highest_timestamped_version.nodeId, (high_ts_node_gc ? "yes" : "no")); + + LOG(warning, "Bucket info prior to update operation was: %s. After update, " + "info on node %u is %s, info on node %u is %s", + _infoAtSendTime.toString().c_str(), + low_timestamped_version.nodeId, low_timestamped_version.bucketInfo.toString().c_str(), + highest_timestamped_version.nodeId, highest_timestamped_version.bucketInfo.toString().c_str()); + +} + +void UpdateOperation::onClose(DistributorStripeMessageSender& sender) { _tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down")); diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h index 46eba48e328..7f3fef1260a 100644 --- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h +++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h @@ -49,6 +49,7 @@ private: const bool _is_auto_create_update; const DistributorNodeContext& _node_ctx; + DistributorStripeOperationContext& _op_ctx; DistributorBucketSpace &_bucketSpace; std::pair<document::BucketId, uint16_t> _newestTimestampLocation; api::BucketInfo _infoAtSendTime; // Should be same across all replicas @@ -70,6 +71,9 @@ private: UpdateMetricSet& _metrics; api::Timestamp adjusted_received_old_timestamp(api::Timestamp old_ts_from_node) const; + void log_inconsistency_warning(const api::UpdateReply& reply, + const PreviousDocumentVersion& highest_timestamped_version, + const PreviousDocumentVersion& low_timestamped_version); }; } diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp index d220a71966f..6aa243d5e99 100644 --- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp +++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp @@ -253,6 +253,10 @@ MergeOperation::deleteSourceOnlyNodes( return; } _removeOperation->setIdealStateManager(_manager); + // We cap the DeleteBucket pri so that it FIFOs with the default feed priority (120). + // Not doing this risks preempting feed ops with deletes, elevating latencies. + // TODO less magical numbers, but the priority mapping is technically config... + _removeOperation->setPriority(std::max(api::StorageMessage::Priority(120), getPriority())); if (_removeOperation->onStartInternal(sender)) { _ok = _removeOperation->ok(); diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp index 8cacbb0bf5a..45129f7be04 100644 --- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp +++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp @@ -259,7 +259,14 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply( && reply.getResult().getResult() != api::ReturnCode::EXISTS) { LOG(spam, "Create bucket reply failed, so deleting it from bucket db"); + // We don't know if the bucket exists at this point, so we remove it from the DB. + // If we get subsequent write load the bucket will be implicitly created again + // (which is an idempotent operation) and all is well. But since we don't know _if_ + // we'll get any further write load we send a RequestBucketInfo to bring the bucket + // back into the DB if it _was_ successfully created. We have to do the latter to + // avoid the risk of introducing an orphaned bucket replica on the content node. _op_ctx.remove_node_from_bucket_database(reply.getBucket(), node); + _op_ctx.recheck_bucket_info(node, reply.getBucket()); } } diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp index 613f0f6ce09..16be7733c1a 100644 --- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp +++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp @@ -311,14 +311,12 @@ bool TopLevelBucketDBUpdater::onRequestBucketInfoReply( const std::shared_ptr<api::RequestBucketInfoReply>& repl) { - if (pending_cluster_state_accepted(repl)) { - return true; - } - return false; + attempt_accept_reply_by_current_pending_state(repl); + return true; } -bool -TopLevelBucketDBUpdater::pending_cluster_state_accepted( +void +TopLevelBucketDBUpdater::attempt_accept_reply_by_current_pending_state( const std::shared_ptr<api::RequestBucketInfoReply>& repl) { if (_pending_cluster_state.get() @@ -328,11 +326,14 @@ TopLevelBucketDBUpdater::pending_cluster_state_accepted( auto guard = _stripe_accessor.rendezvous_and_hold_all(); process_completed_pending_cluster_state(*guard); } - return true; + } else { + // Reply is not recognized, so its corresponding command must have been + // sent by a previous, preempted cluster state. We must still swallow the + // reply to prevent it from being passed further down a storage chain that + // does not expect it. + LOG(spam, "Reply %s was not accepted by pending cluster state", + repl->toString().c_str()); } - LOG(spam, "Reply %s was not accepted by pending cluster state", - repl->toString().c_str()); - return false; } void diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h index b1065e708a4..d8e49d5c383 100644 --- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h +++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h @@ -85,7 +85,7 @@ private: bool should_defer_state_enabling() const noexcept; bool has_pending_cluster_state() const; - bool pending_cluster_state_accepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl); + void attempt_accept_reply_by_current_pending_state(const std::shared_ptr<api::RequestBucketInfoReply>& repl); bool is_pending_cluster_state_completed() const; void process_completed_pending_cluster_state(StripeAccessGuard& guard); void activate_pending_cluster_state(StripeAccessGuard& guard); diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_state.cpp b/storage/src/vespa/storage/persistence/apply_bucket_diff_state.cpp index 97aba76dfac..07823792062 100644 --- a/storage/src/vespa/storage/persistence/apply_bucket_diff_state.cpp +++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_state.cpp @@ -1,10 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "apply_bucket_diff_state.h" -#include "mergehandler.h" +#include "merge_bucket_info_syncer.h" #include "persistenceutil.h" #include <vespa/document/base/documentid.h> #include <vespa/persistence/spi/result.h> +#include <vespa/storageapi/message/bucket.h> +#include <vespa/storage/common/messagesender.h> #include <vespa/vespalib/stllike/asciistream.h> using storage::spi::Result; @@ -20,8 +22,10 @@ public: } }; -ApplyBucketDiffState::ApplyBucketDiffState(const MergeBucketInfoSyncer& merge_bucket_info_syncer, const spi::Bucket& bucket, RetainGuard&& retain_guard) +ApplyBucketDiffState::ApplyBucketDiffState(const MergeBucketInfoSyncer& merge_bucket_info_syncer, MergeHandlerMetrics& merge_handler_metrics, const framework::Clock& clock, const spi::Bucket& bucket, RetainGuard&& retain_guard) : _merge_bucket_info_syncer(merge_bucket_info_syncer), + _merge_handler_metrics(merge_handler_metrics), + _start_time(clock), _bucket(bucket), _fail_message(), _failed_flag(), @@ -30,7 +34,10 @@ ApplyBucketDiffState::ApplyBucketDiffState(const MergeBucketInfoSyncer& merge_bu _tracker(), _delayed_reply(), _sender(nullptr), - _retain_guard(std::move(retain_guard)) + _op_metrics(nullptr), + _op_start_time(), + _retain_guard(std::move(retain_guard)), + _merge_start_time() { } @@ -43,6 +50,10 @@ ApplyBucketDiffState::~ApplyBucketDiffState() _fail_message = e.what(); } } + _merge_handler_metrics.mergeDataWriteLatency.addValue(_start_time.getElapsedTimeAsDouble()); + if (_merge_start_time.has_value()) { + _merge_handler_metrics.mergeLatencyTotal.addValue(_merge_start_time.value().getElapsedTimeAsDouble()); + } if (_promise.has_value()) { _promise.value().set_value(_fail_message); } @@ -51,6 +62,15 @@ ApplyBucketDiffState::~ApplyBucketDiffState() _delayed_reply->setResult(api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, _fail_message)); } if (_sender) { + if (_op_metrics != nullptr) { + if (_delayed_reply->getResult().success()) { + if (_op_start_time.has_value()) { + _op_metrics->latency.addValue(_op_start_time.value().getElapsedTimeAsDouble()); + } + } else { + _op_metrics->failed.inc(); + } + } _sender->sendReply(std::move(_delayed_reply)); } else { // _tracker->_reply and _delayed_reply points to the same reply. @@ -102,10 +122,12 @@ ApplyBucketDiffState::set_delayed_reply(std::unique_ptr<MessageTracker>&& tracke } void -ApplyBucketDiffState::set_delayed_reply(std::unique_ptr<MessageTracker>&& tracker, MessageSender& sender, std::shared_ptr<api::StorageReply>&& delayed_reply) +ApplyBucketDiffState::set_delayed_reply(std::unique_ptr<MessageTracker>&& tracker, MessageSender& sender, FileStorThreadMetrics::Op* op_metrics, const framework::MilliSecTimer& op_start_time, std::shared_ptr<api::StorageReply>&& delayed_reply) { _tracker = std::move(tracker); _sender = &sender; + _op_metrics = op_metrics; + _op_start_time = op_start_time; _delayed_reply = std::move(delayed_reply); } @@ -115,10 +137,16 @@ ApplyBucketDiffState::set_tracker(std::unique_ptr<MessageTracker>&& tracker) _tracker = std::move(tracker); } +void +ApplyBucketDiffState::set_merge_start_time(const framework::MilliSecTimer& merge_start_time) +{ + _merge_start_time = merge_start_time; +} + std::shared_ptr<ApplyBucketDiffState> -ApplyBucketDiffState::create(const MergeBucketInfoSyncer& merge_bucket_info_syncer, const spi::Bucket& bucket, RetainGuard&& retain_guard) +ApplyBucketDiffState::create(const MergeBucketInfoSyncer& merge_bucket_info_syncer, MergeHandlerMetrics& merge_handler_metrics, const framework::Clock& clock, const spi::Bucket& bucket, RetainGuard&& retain_guard) { - std::unique_ptr<ApplyBucketDiffState> state(new ApplyBucketDiffState(merge_bucket_info_syncer, bucket, std::move(retain_guard))); + std::unique_ptr<ApplyBucketDiffState> state(new ApplyBucketDiffState(merge_bucket_info_syncer, merge_handler_metrics, clock, bucket, std::move(retain_guard))); return std::shared_ptr<ApplyBucketDiffState>(state.release(), Deleter()); } diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_state.h b/storage/src/vespa/storage/persistence/apply_bucket_diff_state.h index 20e380e85a7..49625bbf8b5 100644 --- a/storage/src/vespa/storage/persistence/apply_bucket_diff_state.h +++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_state.h @@ -3,6 +3,8 @@ #pragma once #include <vespa/persistence/spi/bucket.h> +#include <vespa/storageframework/generic/clock/timer.h> +#include <vespa/storage/persistence/filestorage/filestormetrics.h> #include <vespa/vespalib/util/retain_guard.h> #include <future> #include <memory> @@ -27,6 +29,8 @@ class MergeBucketInfoSyncer; class ApplyBucketDiffState { class Deleter; const MergeBucketInfoSyncer& _merge_bucket_info_syncer; + MergeHandlerMetrics& _merge_handler_metrics; + framework::MilliSecTimer _start_time; spi::Bucket _bucket; vespalib::string _fail_message; std::atomic_flag _failed_flag; @@ -35,11 +39,14 @@ class ApplyBucketDiffState { std::unique_ptr<MessageTracker> _tracker; std::shared_ptr<api::StorageReply> _delayed_reply; MessageSender* _sender; + FileStorThreadMetrics::Op* _op_metrics; + std::optional<framework::MilliSecTimer> _op_start_time; vespalib::RetainGuard _retain_guard; + std::optional<framework::MilliSecTimer> _merge_start_time; - ApplyBucketDiffState(const MergeBucketInfoSyncer &merge_bucket_info_syncer, const spi::Bucket& bucket, vespalib::RetainGuard&& retain_guard); + ApplyBucketDiffState(const MergeBucketInfoSyncer &merge_bucket_info_syncer, MergeHandlerMetrics& merge_handler_metrics, const framework::Clock& clock, const spi::Bucket& bucket, vespalib::RetainGuard&& retain_guard); public: - static std::shared_ptr<ApplyBucketDiffState> create(const MergeBucketInfoSyncer &merge_bucket_info_syncer, const spi::Bucket& bucket, vespalib::RetainGuard&& retain_guard); + static std::shared_ptr<ApplyBucketDiffState> create(const MergeBucketInfoSyncer &merge_bucket_info_syncer, MergeHandlerMetrics& merge_handler_metrics, const framework::Clock& clock, const spi::Bucket& bucket, vespalib::RetainGuard&& retain_guard); ~ApplyBucketDiffState(); void on_entry_complete(std::unique_ptr<storage::spi::Result> result, const document::DocumentId &doc_id, const char *op); void wait(); @@ -48,8 +55,9 @@ public: void sync_bucket_info(); std::future<vespalib::string> get_future(); void set_delayed_reply(std::unique_ptr<MessageTracker>&& tracker, std::shared_ptr<api::StorageReply>&& delayed_reply); - void set_delayed_reply(std::unique_ptr<MessageTracker>&& tracker, MessageSender& sender, std::shared_ptr<api::StorageReply>&& delayed_reply); + void set_delayed_reply(std::unique_ptr<MessageTracker>&& tracker, MessageSender& sender, FileStorThreadMetrics::Op* op_metrics, const framework::MilliSecTimer& op_start_time, std::shared_ptr<api::StorageReply>&& delayed_reply); void set_tracker(std::unique_ptr<MessageTracker>&& tracker); + void set_merge_start_time(const framework::MilliSecTimer& merge_start_time); const spi::Bucket& get_bucket() const noexcept { return _bucket; } }; diff --git a/storage/src/vespa/storage/persistence/filestorage/CMakeLists.txt b/storage/src/vespa/storage/persistence/filestorage/CMakeLists.txt index b23ec142448..62d1a80501a 100644 --- a/storage/src/vespa/storage/persistence/filestorage/CMakeLists.txt +++ b/storage/src/vespa/storage/persistence/filestorage/CMakeLists.txt @@ -1,6 +1,8 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(storage_filestorpersistence OBJECT SOURCES + active_operations_metrics.cpp + active_operations_stats.cpp filestorhandlerimpl.cpp filestormanager.cpp filestormetrics.cpp diff --git a/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.cpp b/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.cpp new file mode 100644 index 00000000000..b48ef5bf463 --- /dev/null +++ b/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.cpp @@ -0,0 +1,16 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "active_operations_metrics.h" + +namespace storage { + +ActiveOperationsMetrics::ActiveOperationsMetrics(metrics::MetricSet* parent) + : MetricSet("active_operations", {}, "metrics for active operations at service layer", parent), + size("size", {}, "Number of concurrent active operations", this), + latency("latency", {}, "Latency (in ms) for active operations", this) +{ +} + +ActiveOperationsMetrics::~ActiveOperationsMetrics() = default; + +} diff --git a/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.h b/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.h new file mode 100644 index 00000000000..94856d70f9e --- /dev/null +++ b/storage/src/vespa/storage/persistence/filestorage/active_operations_metrics.h @@ -0,0 +1,22 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/metrics/metricset.h> +#include <vespa/metrics/valuemetric.h> + +namespace storage { + +/* + * Metrics for active operations with bucket lock at service layer. + */ +struct ActiveOperationsMetrics : public metrics::MetricSet +{ + metrics::DoubleAverageMetric size; + metrics::DoubleAverageMetric latency; + + ActiveOperationsMetrics(metrics::MetricSet* parent); + ~ActiveOperationsMetrics() override; +}; + +} diff --git a/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.cpp b/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.cpp new file mode 100644 index 00000000000..bd7468971d4 --- /dev/null +++ b/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.cpp @@ -0,0 +1,133 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "active_operations_stats.h" + +namespace storage { + +namespace { + +template <typename T> +void update_min_max(T value, std::optional<T>& min, std::optional<T>& max) +{ + if (!min.has_value() || value < min.value()) { + min = value; + } + if (!max.has_value() || value > max.value()) { + max = value; + } +} + +template <typename T> +void merge_min(std::optional<T>& min, const std::optional<T>& rhs_min) +{ + if (!rhs_min.has_value()) { + return; + } + if (min.has_value() && !(rhs_min.value() < min.value())) { + return; + } + min = rhs_min; +} + +template <typename T> +void merge_max(std::optional<T>& max, const std::optional<T>& rhs_max) +{ + if (!rhs_max.has_value()) { + return; + } + if (max.has_value() && !(rhs_max.value() > max.value())) { + return; + } + max = rhs_max; +} + +template <typename T> +void merge_min_max_sum(std::optional<T>& lhs, const std::optional<T>& rhs) +{ + if (!rhs.has_value()) { + return; + } + if (lhs.has_value()) { + lhs = lhs.value() + rhs.value(); + return; + } + lhs = rhs; +} + +} + +ActiveOperationsStats::ActiveOperationsStats() noexcept + : _size_samples(0u), + _total_size(0u), + _active_size(0u), + _min_size(), + _max_size(), + _latency_samples(0u), + _total_latency(0.0), + _min_latency(), + _max_latency() +{ +} + +ActiveOperationsStats::~ActiveOperationsStats() = default; + + +void +ActiveOperationsStats::update_size() noexcept +{ + ++_size_samples; + _total_size += _active_size; + update_min_max(_active_size, _min_size, _max_size); +} + +ActiveOperationsStats& +ActiveOperationsStats::operator-=(const ActiveOperationsStats& rhs) noexcept +{ + _size_samples -= rhs._size_samples; + _total_size -= rhs._total_size; + _latency_samples -= rhs._latency_samples; + _total_latency -= rhs._total_latency; + return *this; +} + +void +ActiveOperationsStats::merge(const ActiveOperationsStats& rhs) noexcept +{ + _size_samples += rhs._size_samples; + _total_size += rhs._total_size; + _active_size += rhs._active_size; + merge_min_max_sum(_min_size, rhs._min_size); + merge_min_max_sum(_max_size, rhs._max_size); + _latency_samples += rhs._latency_samples; + _total_latency += rhs._total_latency; + merge_min(_min_latency, rhs._min_latency); + merge_max(_max_latency, rhs._max_latency); +} + +void +ActiveOperationsStats::operation_started() noexcept +{ + ++_active_size; + update_size(); +} + +void +ActiveOperationsStats::operation_done(double latency) noexcept +{ + --_active_size; + update_size(); + ++_latency_samples; + _total_latency += latency; + update_min_max(latency, _min_latency, _max_latency); +} + +void +ActiveOperationsStats::reset_min_max() noexcept +{ + _min_size.reset(); + _max_size.reset(); + _min_latency.reset(); + _max_latency.reset(); +} + +} diff --git a/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.h b/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.h new file mode 100644 index 00000000000..bdf4e87b1f5 --- /dev/null +++ b/storage/src/vespa/storage/persistence/filestorage/active_operations_stats.h @@ -0,0 +1,45 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <cstdint> +#include <optional> + +namespace storage { + +/* + * Stats for active operations at service layer + */ +class ActiveOperationsStats +{ + uint64_t _size_samples; + uint64_t _total_size; + uint32_t _active_size; + std::optional<uint32_t> _min_size; + std::optional<uint32_t> _max_size; + uint64_t _latency_samples; + double _total_latency; + std::optional<double> _min_latency; + std::optional<double> _max_latency; + + void update_size() noexcept; +public: + ActiveOperationsStats() noexcept; + ~ActiveOperationsStats(); + ActiveOperationsStats& operator-=(const ActiveOperationsStats& rhs) noexcept; + void merge(const ActiveOperationsStats& rhs) noexcept; + void operation_started() noexcept; + void operation_done(double latency) noexcept; + void reset_min_max() noexcept; + uint64_t get_size_samples() const noexcept { return _size_samples; } + uint64_t get_latency_samples() const noexcept { return _latency_samples; } + uint64_t get_total_size() const noexcept { return _total_size; } + uint32_t get_active_size() const noexcept { return _active_size; } + double get_total_latency() const noexcept { return _total_latency; } + const std::optional<uint32_t>& get_min_size() const noexcept { return _min_size; } + const std::optional<uint32_t>& get_max_size() const noexcept { return _max_size; } + const std::optional<double>& get_min_latency() const noexcept { return _min_latency; } + const std::optional<double>& get_max_latency() const noexcept { return _max_latency; } +}; + +} diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h index 70ed9845cb0..a980b5aa2e1 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h +++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h @@ -29,6 +29,7 @@ namespace framework { class HttpUrlPath; } +class ActiveOperationsStats; class FileStorHandlerImpl; struct FileStorMetrics; struct MessageSender; @@ -48,14 +49,29 @@ public: {} }; - class BucketLockInterface { + // Interface that is used for "early ACKing" a potentially longer-running async + // operation when the persistence thread processing the operation has completed + // the synchronous aspects of the operation (such as dispatching one or more + // async operations over the SPI). + class OperationSyncPhaseDoneNotifier { public: - using SP = std::shared_ptr<BucketLockInterface>; + virtual ~OperationSyncPhaseDoneNotifier() = default; + + // Informs the caller if the operation wants to know when the persistence thread is + // done with the synchronous aspects of the operation. Returning false allows the caller + // to optimize for the case where this does _not_ need to happen. + [[nodiscard]] virtual bool wants_sync_phase_done_notification() const noexcept = 0; + // Invoked at most once at the point where the persistence thread is done handling the synchronous + // aspects of the operation iff wants_sync_phase_done_notification() was initially true. + virtual void signal_operation_sync_phase_done() noexcept = 0; + }; - virtual const document::Bucket &getBucket() const = 0; - virtual api::LockingRequirements lockingRequirements() const noexcept = 0; + class BucketLockInterface : public OperationSyncPhaseDoneNotifier { + public: + using SP = std::shared_ptr<BucketLockInterface>; - virtual ~BucketLockInterface() = default; + [[nodiscard]] virtual const document::Bucket &getBucket() const = 0; + [[nodiscard]] virtual api::LockingRequirements lockingRequirements() const noexcept = 0; }; using LockedMessage = std::pair<BucketLockInterface::SP, api::StorageMessage::SP>; @@ -233,6 +249,7 @@ public: virtual std::string dumpQueue() const = 0; + virtual ActiveOperationsStats get_active_operations_stats(bool reset_min_max) const = 0; }; } // storage diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp index e395a7df9e0..c6991803b4d 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp +++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp @@ -55,7 +55,8 @@ FileStorHandlerImpl::FileStorHandlerImpl(uint32_t numThreads, uint32_t numStripe _bucketIdFactory(_component.getBucketIdFactory()), _getNextMessageTimeout(100ms), _max_active_merges_per_stripe(per_stripe_merge_limit(numThreads, numStripes)), - _paused(false) + _paused(false), + _last_active_operations_stats() { assert(numStripes > 0); _stripes.reserve(numStripes); @@ -297,6 +298,33 @@ FileStorHandlerImpl::abortQueuedOperations(const AbortBucketOperationsCommand& c } void +FileStorHandlerImpl::update_active_operations_metrics() +{ + auto& metrics = _metrics->active_operations; + auto stats = get_active_operations_stats(true); + auto& last_stats = _last_active_operations_stats; + auto delta_stats = stats; + if (last_stats.has_value()) { + delta_stats -= last_stats.value(); + } + last_stats = stats; + uint32_t size_samples = delta_stats.get_size_samples(); + if (size_samples != 0) { + double min_size = delta_stats.get_min_size().value_or(0); + double max_size = delta_stats.get_max_size().value_or(0); + double avg_size = ((double) delta_stats.get_total_size()) / size_samples; + metrics.size.addValueBatch(avg_size, size_samples, min_size, max_size); + } + uint32_t latency_samples = delta_stats.get_latency_samples(); + if (latency_samples != 0) { + double min_latency = delta_stats.get_min_latency().value_or(0.0); + double max_latency = delta_stats.get_max_latency().value_or(0.0); + double avg_latency = delta_stats.get_total_latency() / latency_samples; + metrics.latency.addValueBatch(avg_latency, latency_samples, min_latency, max_latency); + } +} + +void FileStorHandlerImpl::updateMetrics(const MetricLockGuard &) { std::lock_guard lockGuard(_mergeStatesLock); @@ -307,6 +335,7 @@ FileStorHandlerImpl::updateMetrics(const MetricLockGuard &) const auto & m = stripe->averageQueueWaitingTime; _metrics->averageQueueWaitingTime.addTotalValueWithCount(m.getTotal(), m.getCount()); } + update_active_operations_metrics(); } bool @@ -852,7 +881,8 @@ FileStorHandlerImpl::Stripe::Stripe(const FileStorHandlerImpl & owner, MessageSe _cond(std::make_unique<std::condition_variable>()), _queue(std::make_unique<PriorityQueue>()), _lockedBuckets(), - _active_merges(0) + _active_merges(0), + _active_operations_stats() {} FileStorHandler::LockedMessage @@ -1024,28 +1054,34 @@ message_type_is_merge_related(api::MessageType::Id msg_type_id) { void FileStorHandlerImpl::Stripe::release(const document::Bucket & bucket, api::LockingRequirements reqOfReleasedLock, - api::StorageMessage::Id lockMsgId) + api::StorageMessage::Id lockMsgId, + bool was_active_merge) { std::unique_lock guard(*_lock); auto iter = _lockedBuckets.find(bucket); assert(iter != _lockedBuckets.end()); auto& entry = iter->second; + Clock::time_point start_time; if (reqOfReleasedLock == api::LockingRequirements::Exclusive) { assert(entry._exclusiveLock); assert(entry._exclusiveLock->msgId == lockMsgId); - if (message_type_is_merge_related(entry._exclusiveLock->msgType)) { + if (was_active_merge) { assert(_active_merges > 0); --_active_merges; } + start_time = entry._exclusiveLock.value().timestamp; entry._exclusiveLock.reset(); } else { assert(!entry._exclusiveLock); auto shared_iter = entry._sharedLocks.find(lockMsgId); assert(shared_iter != entry._sharedLocks.end()); + start_time = shared_iter->second.timestamp; entry._sharedLocks.erase(shared_iter); } - + Clock::time_point now_ts = Clock::now(); + double latency = std::chrono::duration<double, std::milli>(now_ts - start_time).count(); + _active_operations_stats.operation_done(latency); if (!entry._exclusiveLock && entry._sharedLocks.empty()) { _lockedBuckets.erase(iter); // No more locks held } @@ -1054,13 +1090,27 @@ FileStorHandlerImpl::Stripe::release(const document::Bucket & bucket, } void +FileStorHandlerImpl::Stripe::decrease_active_sync_merges_counter() noexcept +{ + std::unique_lock guard(*_lock); + assert(_active_merges > 0); + const bool may_have_blocked_merge = (_active_merges == _owner._max_active_merges_per_stripe); + --_active_merges; + if (may_have_blocked_merge) { + guard.unlock(); + _cond->notify_all(); + } +} + +void FileStorHandlerImpl::Stripe::lock(const monitor_guard &, const document::Bucket & bucket, - api::LockingRequirements lockReq, const LockEntry & lockEntry) { + api::LockingRequirements lockReq, bool count_as_active_merge, + const LockEntry & lockEntry) { auto& entry = _lockedBuckets[bucket]; assert(!entry._exclusiveLock); if (lockReq == api::LockingRequirements::Exclusive) { assert(entry._sharedLocks.empty()); - if (message_type_is_merge_related(lockEntry.msgType)) { + if (count_as_active_merge) { ++_active_merges; } entry._exclusiveLock = lockEntry; @@ -1070,6 +1120,7 @@ FileStorHandlerImpl::Stripe::lock(const monitor_guard &, const document::Bucket (void) inserted; assert(inserted.second); } + _active_operations_stats.operation_started(); } bool @@ -1104,28 +1155,54 @@ FileStorHandlerImpl::Stripe::operationIsInhibited(const monitor_guard & guard, c return isLocked(guard, bucket, msg.lockingRequirements()); } -FileStorHandlerImpl::BucketLock::BucketLock(const monitor_guard & guard, Stripe& stripe, - const document::Bucket &bucket, uint8_t priority, +ActiveOperationsStats +FileStorHandlerImpl::Stripe::get_active_operations_stats(bool reset_min_max) const +{ + std::lock_guard guard(*_lock); + auto result = _active_operations_stats; + if (reset_min_max) { + _active_operations_stats.reset_min_max(); + } + return result; +} + +FileStorHandlerImpl::BucketLock::BucketLock(const monitor_guard& guard, Stripe& stripe, + const document::Bucket& bucket, uint8_t priority, api::MessageType::Id msgType, api::StorageMessage::Id msgId, api::LockingRequirements lockReq) : _stripe(stripe), _bucket(bucket), _uniqueMsgId(msgId), - _lockReq(lockReq) + _lockReq(lockReq), + _counts_towards_merge_limit(false) { if (_bucket.getBucketId().getRawId() != 0) { - _stripe.lock(guard, _bucket, lockReq, Stripe::LockEntry(priority, msgType, msgId)); + _counts_towards_merge_limit = message_type_is_merge_related(msgType); + _stripe.lock(guard, _bucket, lockReq, _counts_towards_merge_limit, Stripe::LockEntry(priority, msgType, msgId)); LOG(spam, "Locked bucket %s for message %" PRIu64 " with priority %u in mode %s", - bucket.getBucketId().toString().c_str(), msgId, priority, api::to_string(lockReq)); + bucket.toString().c_str(), msgId, priority, api::to_string(lockReq)); } } FileStorHandlerImpl::BucketLock::~BucketLock() { if (_bucket.getBucketId().getRawId() != 0) { - _stripe.release(_bucket, _lockReq, _uniqueMsgId); + _stripe.release(_bucket, _lockReq, _uniqueMsgId, _counts_towards_merge_limit); LOG(spam, "Unlocked bucket %s for message %" PRIu64 " in mode %s", - _bucket.getBucketId().toString().c_str(), _uniqueMsgId, api::to_string(_lockReq)); + _bucket.toString().c_str(), _uniqueMsgId, api::to_string(_lockReq)); + } +} + +void +FileStorHandlerImpl::BucketLock::signal_operation_sync_phase_done() noexcept +{ + // Not atomic, only destructor can read/write this other than this function, and since + // a strong ref must already be held to this object by the caller, we cannot race with it. + if (_counts_towards_merge_limit){ + LOG(spam, "Synchronous phase for bucket %s is done; reducing active count proactively", + _bucket.toString().c_str()); + _stripe.decrease_active_sync_merges_counter(); + _counts_towards_merge_limit = false; } } @@ -1231,10 +1308,10 @@ FileStorHandlerImpl::getStatus(std::ostream& out, const framework::HttpUrlPath& } std::lock_guard mergeGuard(_mergeStatesLock); - out << "<tr><td>Active merge operations</td><td>" << _mergeStates.size() << "</td></tr>\n"; + out << "<p>Active merge operations: " << _mergeStates.size() << "</p>\n"; if (verbose) { out << "<h4>Active merges</h4>\n"; - if (_mergeStates.size() == 0) { + if (_mergeStates.empty()) { out << "None\n"; } for (auto & entry : _mergeStates) { @@ -1243,6 +1320,17 @@ FileStorHandlerImpl::getStatus(std::ostream& out, const framework::HttpUrlPath& } } +ActiveOperationsStats +FileStorHandlerImpl::get_active_operations_stats(bool reset_min_max) const +{ + ActiveOperationsStats result; + for (const auto & stripe : _stripes) { + auto stats = stripe.get_active_operations_stats(reset_min_max); + result.merge(stats); + } + return result; +} + void FileStorHandlerImpl::waitUntilNoLocks() { diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h index 5f212b18a7f..5d68be8a800 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h +++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h @@ -16,6 +16,7 @@ #pragma once #include "filestorhandler.h" +#include "active_operations_stats.h" #include <vespa/document/bucket/bucketid.h> #include <vespa/metrics/metrictimer.h> #include <vespa/storage/common/servicelayercomponent.h> @@ -114,7 +115,8 @@ public: return _queue->size(); } void release(const document::Bucket & bucket, api::LockingRequirements reqOfReleasedLock, - api::StorageMessage::Id lockMsgId); + api::StorageMessage::Id lockMsgId, bool was_active_merge); + void decrease_active_sync_merges_counter() noexcept; // Subsumes isLocked bool operationIsInhibited(const monitor_guard &, const document::Bucket&, @@ -123,7 +125,8 @@ public: api::LockingRequirements lockReq) const noexcept; void lock(const monitor_guard &, const document::Bucket & bucket, - api::LockingRequirements lockReq, const LockEntry & lockEntry); + api::LockingRequirements lockReq, bool count_as_active_merge, + const LockEntry & lockEntry); std::shared_ptr<FileStorHandler::BucketLockInterface> lock(const document::Bucket & bucket, api::LockingRequirements lockReq); void failOperations(const document::Bucket & bucket, const api::ReturnCode & code); @@ -136,6 +139,7 @@ public: PriorityQueue & exposeQueue() { return *_queue; } BucketIdx & exposeBucketIdx() { return bmi::get<2>(*_queue); } void setMetrics(FileStorStripeMetrics * metrics) { _metrics = metrics; } + ActiveOperationsStats get_active_operations_stats(bool reset_min_max) const; private: bool hasActive(monitor_guard & monitor, const AbortBucketOperationsCommand& cmd) const; FileStorHandler::LockedMessage get_next_async_message(monitor_guard& guard); @@ -153,6 +157,7 @@ public: std::unique_ptr<PriorityQueue> _queue; LockedBuckets _lockedBuckets; uint32_t _active_merges; + mutable ActiveOperationsStats _active_operations_stats; }; class BucketLock : public FileStorHandler::BucketLockInterface { @@ -165,12 +170,17 @@ public: const document::Bucket &getBucket() const override { return _bucket; } api::LockingRequirements lockingRequirements() const noexcept override { return _lockReq; } + void signal_operation_sync_phase_done() noexcept override; + bool wants_sync_phase_done_notification() const noexcept override { + return _counts_towards_merge_limit; + } private: Stripe & _stripe; - document::Bucket _bucket; + const document::Bucket _bucket; api::StorageMessage::Id _uniqueMsgId; api::LockingRequirements _lockReq; + bool _counts_towards_merge_limit; }; @@ -232,6 +242,9 @@ public: // Implements ResumeGuard::Callback void resume() override; + // Use only for testing + framework::MetricUpdateHook& get_metric_update_hook_for_testing() { return *this; } + private: ServiceLayerComponent _component; std::atomic<DiskState> _state; @@ -246,6 +259,7 @@ private: mutable std::mutex _pauseMonitor; mutable std::condition_variable _pauseCond; std::atomic<bool> _paused; + std::optional<ActiveOperationsStats> _last_active_operations_stats; // Returns the index in the targets array we are sending to, or -1 if none of them match. int calculateTargetBasedOnDocId(const api::StorageMessage& msg, std::vector<RemapInfo*>& targets); @@ -277,6 +291,8 @@ private: static std::unique_ptr<api::StorageReply> makeQueueTimeoutReply(api::StorageMessage& msg); static bool messageMayBeAborted(const api::StorageMessage& msg); + void update_active_operations_metrics(); + // Implements framework::MetricUpdateHook void updateMetrics(const MetricLockGuard &) override; @@ -322,6 +338,7 @@ private: return _stripes[stripeId].getNextMessage(timeout); } + ActiveOperationsStats get_active_operations_stats(bool reset_min_max) const override; }; } // storage diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp index c32efb6aa66..2cfb3a2cffe 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp +++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp @@ -23,6 +23,7 @@ #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/idestructorcallback.h> #include <vespa/vespalib/util/sequencedtaskexecutor.h> +#include <algorithm> #include <thread> #include <vespa/log/bufferedlogger.h> @@ -882,28 +883,67 @@ namespace { }; } +bool +FileStorManager::maintenance_in_all_spaces(const lib::Node& node) const noexcept +{ + for (auto& elem : _component.getBucketSpaceRepo()) { + ContentBucketSpace& bucket_space = *elem.second; + auto derived_cluster_state = bucket_space.getClusterState(); + if (!derived_cluster_state->getNodeState(node).getState().oneOf("m")) { + return false; + } + }; + return true; +} + +bool +FileStorManager::should_deactivate_buckets(const ContentBucketSpace& space, + bool node_up_in_space, + bool maintenance_in_all_spaces) noexcept +{ + // Important: this MUST match the semantics in proton::BucketHandler::notifyClusterStateChanged()! + // Otherwise, the content layer and proton will be out of sync in terms of bucket activation state. + if (maintenance_in_all_spaces) { + return false; + } + return ((space.getNodeUpInLastNodeStateSeenByProvider() && !node_up_in_space) + || space.getNodeMaintenanceInLastNodeStateSeenByProvider()); +} + +void +FileStorManager::maybe_log_received_cluster_state() +{ + if (LOG_WOULD_LOG(debug)) { + auto cluster_state_bundle = _component.getStateUpdater().getClusterStateBundle(); + auto baseline_state = cluster_state_bundle->getBaselineClusterState(); + LOG(debug, "FileStorManager received baseline cluster state '%s'", baseline_state->toString().c_str()); + } +} + void FileStorManager::updateState() { - auto clusterStateBundle = _component.getStateUpdater().getClusterStateBundle(); - lib::ClusterState::CSP state(clusterStateBundle->getBaselineClusterState()); - lib::Node node(_component.getNodeType(), _component.getIndex()); + maybe_log_received_cluster_state(); + const lib::Node node(_component.getNodeType(), _component.getIndex()); + const bool in_maintenance = maintenance_in_all_spaces(node); - LOG(debug, "FileStorManager received cluster state '%s'", state->toString().c_str()); for (const auto &elem : _component.getBucketSpaceRepo()) { BucketSpace bucketSpace(elem.first); - ContentBucketSpace &contentBucketSpace = *elem.second; + ContentBucketSpace& contentBucketSpace = *elem.second; auto derivedClusterState = contentBucketSpace.getClusterState(); - bool nodeUp = derivedClusterState->getNodeState(node).getState().oneOf("uir"); - // If edge where we go down - if (contentBucketSpace.getNodeUpInLastNodeStateSeenByProvider() && !nodeUp) { - LOG(debug, "Received cluster state where this node is down; de-activating all buckets in database for bucket space %s", bucketSpace.toString().c_str()); + const bool node_up_in_space = derivedClusterState->getNodeState(node).getState().oneOf("uir"); + if (should_deactivate_buckets(contentBucketSpace, node_up_in_space, in_maintenance)) { + LOG(debug, "Received cluster state where this node is down; de-activating all buckets " + "in database for bucket space %s", bucketSpace.toString().c_str()); Deactivator deactivator; contentBucketSpace.bucketDatabase().for_each_mutable_unordered( std::ref(deactivator), "FileStorManager::updateState"); } - contentBucketSpace.setNodeUpInLastNodeStateSeenByProvider(nodeUp); - spi::ClusterState spiState(*derivedClusterState, _component.getIndex(), *contentBucketSpace.getDistribution()); + contentBucketSpace.setNodeUpInLastNodeStateSeenByProvider(node_up_in_space); + contentBucketSpace.setNodeMaintenanceInLastNodeStateSeenByProvider(in_maintenance); + spi::ClusterState spiState(*derivedClusterState, _component.getIndex(), + *contentBucketSpace.getDistribution(), + in_maintenance); _provider->setClusterState(bucketSpace, spiState); } } diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h index 76cb31f32d4..b7450de13d8 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h +++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h @@ -39,6 +39,7 @@ namespace api { } namespace spi { struct PersistenceProvider; } +class ContentBucketSpace; struct FileStorManagerTest; class ReadBucketList; class BucketOwnershipNotifier; @@ -170,6 +171,11 @@ private: void onFlush(bool downwards) override; void reportHtmlStatus(std::ostream&, const framework::HttpUrlPath&) const override; void storageDistributionChanged() override; + [[nodiscard]] static bool should_deactivate_buckets(const ContentBucketSpace& space, + bool node_up_in_space, + bool maintenance_in_all_spaces) noexcept; + [[nodiscard]] bool maintenance_in_all_spaces(const lib::Node& node) const noexcept; + void maybe_log_received_cluster_state(); void updateState(); void propagateClusterStates(); void update_reported_state_after_db_init(); diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp index cb7e141f5db..c119fdc4f69 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp +++ b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp @@ -206,7 +206,8 @@ FileStorDiskMetrics::FileStorDiskMetrics(const std::string& name, const std::str waitingForLockHitRate("waitingforlockrate", {}, "Amount of times a filestor thread has needed to wait for " "lock to take next message in queue.", this), - lockWaitTime("lockwaittime", {}, "Amount of time waiting used waiting for lock.", this) + lockWaitTime("lockwaittime", {}, "Amount of time waiting used waiting for lock.", this), + active_operations(this) { pendingMerges.unsetOnZeroValue(); waitingForLockHitRate.unsetOnZeroValue(); diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.h b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.h index 8f7f79add00..7543e6e0771 100644 --- a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.h +++ b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.h @@ -11,6 +11,7 @@ #pragma once #include "merge_handler_metrics.h" +#include "active_operations_metrics.h" #include <vespa/metrics/metricset.h> #include <vespa/metrics/summetric.h> @@ -147,7 +148,8 @@ public: metrics::LongAverageMetric queueSize; metrics::LongAverageMetric pendingMerges; metrics::DoubleAverageMetric waitingForLockHitRate; - metrics::DoubleAverageMetric lockWaitTime; + metrics::DoubleAverageMetric lockWaitTime; // unused + ActiveOperationsMetrics active_operations; FileStorDiskMetrics(const std::string& name, const std::string& description, MetricSet* owner); ~FileStorDiskMetrics() override; diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp index 254a26aa454..0c9cecdb6a1 100644 --- a/storage/src/vespa/storage/persistence/mergehandler.cpp +++ b/storage/src/vespa/storage/persistence/mergehandler.cpp @@ -104,6 +104,29 @@ void check_apply_diff_sync(std::shared_ptr<ApplyBucketDiffState> async_results) } } +FileStorThreadMetrics::Op *get_op_metrics(FileStorThreadMetrics& metrics, const api::StorageReply &reply) { + switch (reply.getType().getId()) { + case api::MessageType::MERGEBUCKET_REPLY_ID: + return &metrics.mergeBuckets; + case api::MessageType::APPLYBUCKETDIFF_REPLY_ID: + return &metrics.applyBucketDiff; + default: + ; + } + return nullptr; +} + +void update_op_metrics(FileStorThreadMetrics& metrics, const api::StorageReply &reply, const framework::MilliSecTimer& start_time) { + auto op_metrics = get_op_metrics(metrics, reply); + if (op_metrics) { + if (reply.getResult().success()) { + op_metrics->latency.addValue(start_time.getElapsedTimeAsDouble()); + } else { + op_metrics->failed.inc(); + } + } +} + } // anonymous namespace void @@ -1223,6 +1246,7 @@ MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply, MessageSe } if (replyToSend.get()) { replyToSend->setResult(reply.getResult()); + update_op_metrics(_env._metrics, *replyToSend, s->startTime); sender.sendReply(replyToSend); } } @@ -1255,14 +1279,11 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd, MessageTra _env._nodeIndex, index); } if (applyDiffHasLocallyNeededData(cmd.getDiff(), index)) { - framework::MilliSecTimer startTime(_clock); - async_results = ApplyBucketDiffState::create(*this, bucket, RetainGuard(*_monitored_ref_count)); + async_results = ApplyBucketDiffState::create(*this, _env._metrics.merge_handler_metrics, _clock, bucket, RetainGuard(*_monitored_ref_count)); applyDiffLocally(bucket, cmd.getDiff(), index, tracker->context(), async_results); if (!_async_apply_bucket_diff.load(std::memory_order_relaxed)) { check_apply_diff_sync(std::move(async_results)); } - _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue( - startTime.getElapsedTimeAsDouble()); } else { LOG(spam, "Merge(%s): Didn't need fetched data on node %u (%u).", bucket.toString().c_str(), _env._nodeIndex, index); @@ -1365,13 +1386,11 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply, Messa _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble()); } if (applyDiffHasLocallyNeededData(diff, index)) { - framework::MilliSecTimer startTime(_clock); - async_results = ApplyBucketDiffState::create(*this, bucket, RetainGuard(*_monitored_ref_count)); + async_results = ApplyBucketDiffState::create(*this, _env._metrics.merge_handler_metrics, _clock, bucket, RetainGuard(*_monitored_ref_count)); applyDiffLocally(bucket, diff, index, s->context, async_results); if (!_async_apply_bucket_diff.load(std::memory_order_relaxed)) { check_apply_diff_sync(std::move(async_results)); } - _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(startTime.getElapsedTimeAsDouble()); } else { LOG(spam, "Merge(%s): Didn't need fetched data on node %u (%u)", bucket.toString().c_str(), @@ -1416,7 +1435,11 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply, Messa // We have sent something on and shouldn't reply now. clearState = false; } else { - _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(s->startTime.getElapsedTimeAsDouble()); + if (async_results) { + async_results->set_merge_start_time(s->startTime); + } else { + _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(s->startTime.getElapsedTimeAsDouble()); + } } } } else { @@ -1434,7 +1457,8 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply, Messa if (async_results && replyToSend) { replyToSend->setResult(returnCode); - async_results->set_delayed_reply(std::move(tracker), sender, std::move(replyToSend)); + auto op_metrics = get_op_metrics(_env._metrics, *replyToSend); + async_results->set_delayed_reply(std::move(tracker), sender, op_metrics, s->startTime, std::move(replyToSend)); } if (clearState) { _env._fileStorHandler.clearMergeStatus(bucket.getBucket()); @@ -1442,6 +1466,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply, Messa if (replyToSend.get()) { // Send on replyToSend->setResult(returnCode); + update_op_metrics(_env._metrics, *replyToSend, s->startTime); sender.sendReply(replyToSend); } } diff --git a/storage/src/vespa/storage/persistence/persistencehandler.cpp b/storage/src/vespa/storage/persistence/persistencehandler.cpp index 3c981e193f2..8b546771b71 100644 --- a/storage/src/vespa/storage/persistence/persistencehandler.cpp +++ b/storage/src/vespa/storage/persistence/persistencehandler.cpp @@ -29,9 +29,29 @@ PersistenceHandler::PersistenceHandler(vespalib::ISequencedTaskExecutor & sequen PersistenceHandler::~PersistenceHandler() = default; +// Guard that allows an operation that may be executed in an async fashion to +// be explicitly notified when the sync phase of the operation is done, i.e. +// when the persistence thread is no longer working on it. An operation that +// does not care about such notifications can safely return a nullptr notifier, +// in which case the guard is a no-op. +class OperationSyncPhaseTrackingGuard { + std::shared_ptr<FileStorHandler::OperationSyncPhaseDoneNotifier> _maybe_notifier; +public: + explicit OperationSyncPhaseTrackingGuard(const MessageTracker& tracker) + : _maybe_notifier(tracker.sync_phase_done_notifier_or_nullptr()) + {} + + ~OperationSyncPhaseTrackingGuard() { + if (_maybe_notifier) { + _maybe_notifier->signal_operation_sync_phase_done(); + } + } +}; + MessageTracker::UP PersistenceHandler::handleCommandSplitByType(api::StorageCommand& msg, MessageTracker::UP tracker) const { + OperationSyncPhaseTrackingGuard sync_guard(*tracker); switch (msg.getType().getId()) { case api::MessageType::GET_ID: return _simpleHandler.handleGet(static_cast<api::GetCommand&>(msg), std::move(tracker)); diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp index 3f3d59c11aa..cbfc9463a8c 100644 --- a/storage/src/vespa/storage/persistence/persistenceutil.cpp +++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp @@ -155,6 +155,15 @@ MessageTracker::generateReply(api::StorageCommand& cmd) } } +std::shared_ptr<FileStorHandler::OperationSyncPhaseDoneNotifier> +MessageTracker::sync_phase_done_notifier_or_nullptr() const +{ + if (_bucketLock->wants_sync_phase_done_notification()) { + return _bucketLock; + } + return {}; +} + PersistenceUtil::PersistenceUtil(const ServiceLayerComponent& component, FileStorHandler& fileStorHandler, FileStorThreadMetrics& metrics, spi::PersistenceProvider& provider) : _component(component), diff --git a/storage/src/vespa/storage/persistence/persistenceutil.h b/storage/src/vespa/storage/persistence/persistenceutil.h index 4cc2657ea56..4fd0e60c730 100644 --- a/storage/src/vespa/storage/persistence/persistenceutil.h +++ b/storage/src/vespa/storage/persistence/persistenceutil.h @@ -27,7 +27,7 @@ class PersistenceUtil; class MessageTracker : protected Types { public: - typedef std::unique_ptr<MessageTracker> UP; + using UP = std::unique_ptr<MessageTracker>; MessageTracker(const framework::MilliSecTimer & timer, const PersistenceUtil & env, MessageSender & replySender, FileStorHandler::BucketLockInterface::SP bucketLock, std::shared_ptr<api::StorageMessage> msg); @@ -81,6 +81,10 @@ public: bool checkForError(const spi::Result& response); + // Returns a non-nullptr notifier instance iff the underlying operation wants to be notified + // when the sync phase is complete. Otherwise returns a nullptr shared_ptr. + std::shared_ptr<FileStorHandler::OperationSyncPhaseDoneNotifier> sync_phase_done_notifier_or_nullptr() const; + static MessageTracker::UP createForTesting(const framework::MilliSecTimer & timer, PersistenceUtil & env, MessageSender & replySender, FileStorHandler::BucketLockInterface::SP bucketLock, std::shared_ptr<api::StorageMessage> msg); diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp index d9b15fdac65..1cb2334803e 100644 --- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp +++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp @@ -18,6 +18,7 @@ #include <vespa/persistence/spi/exceptions.h> #include <vespa/vespalib/util/exceptions.h> #include <vespa/messagebus/rpcmessagebus.h> +#include <vespa/config/common/exceptions.h> #include <vespa/log/log.h> LOG_SETUP(".node.servicelayer"); @@ -52,6 +53,9 @@ void ServiceLayerNode::init() } catch (spi::HandledException& e) { requestShutdown("Failed to initialize: " + e.getMessage()); throw; + } catch (const config::ConfigTimeoutException &e) { + LOG(warning, "Error subscribing to initial config: '%s'", e.what()); + throw; } catch (const vespalib::NetworkSetupFailureException & e) { LOG(warning, "Network failure: '%s'", e.what()); throw; diff --git a/storage/src/vespa/storage/storageserver/storagenode.cpp b/storage/src/vespa/storage/storageserver/storagenode.cpp index 2e199db1f07..c19fca8c58c 100644 --- a/storage/src/vespa/storage/storageserver/storagenode.cpp +++ b/storage/src/vespa/storage/storageserver/storagenode.cpp @@ -485,7 +485,7 @@ void StorageNode::configure(std::unique_ptr<StorDistributionConfig> config) { } } void -StorageNode::configure(std::unique_ptr<document::DocumenttypesConfig> config, +StorageNode::configure(std::unique_ptr<document::config::DocumenttypesConfig> config, bool hasChanged, int64_t generation) { log_config_received(*config); diff --git a/storage/src/vespa/storage/storageserver/storagenode.h b/storage/src/vespa/storage/storageserver/storagenode.h index 74a070cb105..c49737af78b 100644 --- a/storage/src/vespa/storage/storageserver/storagenode.h +++ b/storage/src/vespa/storage/storageserver/storagenode.h @@ -133,7 +133,7 @@ private: void configure(std::unique_ptr<StorServerConfig> config) override; void configure(std::unique_ptr<UpgradingConfig> config) override; void configure(std::unique_ptr<StorDistributionConfig> config) override; - virtual void configure(std::unique_ptr<document::DocumenttypesConfig> config, + virtual void configure(std::unique_ptr<document::config::DocumenttypesConfig> config, bool hasChanged, int64_t generation); void configure(std::unique_ptr<BucketspacesConfig>) override; void updateUpgradeFlag(const UpgradingConfig&); @@ -148,13 +148,13 @@ protected: std::unique_ptr<StorServerConfig> _serverConfig; std::unique_ptr<UpgradingConfig> _clusterConfig; std::unique_ptr<StorDistributionConfig> _distributionConfig; - std::unique_ptr<document::DocumenttypesConfig> _doctypesConfig; + std::unique_ptr<document::config::DocumenttypesConfig> _doctypesConfig; std::unique_ptr<BucketspacesConfig> _bucketSpacesConfig; // New configs gotten that has yet to have been handled std::unique_ptr<StorServerConfig> _newServerConfig; std::unique_ptr<UpgradingConfig> _newClusterConfig; std::unique_ptr<StorDistributionConfig> _newDistributionConfig; - std::unique_ptr<document::DocumenttypesConfig> _newDoctypesConfig; + std::unique_ptr<document::config::DocumenttypesConfig> _newDoctypesConfig; std::unique_ptr<BucketspacesConfig> _newBucketSpacesConfig; std::unique_ptr<StorageComponent> _component; std::unique_ptr<NodeIdentity> _node_identity; diff --git a/storageserver/src/vespa/storageserver/app/process.cpp b/storageserver/src/vespa/storageserver/app/process.cpp index 9ec1ed2bd10..8480f1427f2 100644 --- a/storageserver/src/vespa/storageserver/app/process.cpp +++ b/storageserver/src/vespa/storageserver/app/process.cpp @@ -21,7 +21,7 @@ Process::Process(const config::ConfigUri & configUri) void Process::setupConfig(milliseconds subscribeTimeout) { - _documentHandler = _configSubscriber.subscribe<document::DocumenttypesConfig>(_configUri.getConfigId(), subscribeTimeout); + _documentHandler = _configSubscriber.subscribe<document::config::DocumenttypesConfig>(_configUri.getConfigId(), subscribeTimeout); if (!_configSubscriber.nextConfig()) { throw vespalib::TimeoutException("Could not subscribe to document config within timeout"); } diff --git a/storageserver/src/vespa/storageserver/app/process.h b/storageserver/src/vespa/storageserver/app/process.h index 5f22906896b..407c8cf8881 100644 --- a/storageserver/src/vespa/storageserver/app/process.h +++ b/storageserver/src/vespa/storageserver/app/process.h @@ -34,7 +34,7 @@ protected: config::ConfigSubscriber _configSubscriber; private: - config::ConfigHandle<document::DocumenttypesConfig>::UP _documentHandler; + config::ConfigHandle<document::config::DocumenttypesConfig>::UP _documentHandler; std::vector<DocumentTypeRepoSP> _repos; public: diff --git a/tenant-base/pom.xml b/tenant-base/pom.xml index acf62b1d6e6..f4923bf79f1 100644 --- a/tenant-base/pom.xml +++ b/tenant-base/pom.xml @@ -40,7 +40,7 @@ <target_jdk_version>11</target_jdk_version> <maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version> <maven-surefire-plugin.version>2.22.0</maven-surefire-plugin.version> - <junit.version>5.7.0</junit.version> <!-- Keep in sync with hosted-tenant-base and tenant-cd until all direct use is removed --> + <junit.version>5.8.1</junit.version> <!-- Keep in sync with hosted-tenant-base and tenant-cd until all direct use is removed --> <endpoint>https://api.vespa-external.aws.oath.cloud:4443</endpoint> <test.categories>!integration</test.categories> </properties> diff --git a/tenant-cd-api/pom.xml b/tenant-cd-api/pom.xml index e45fd7e9586..60d862a8844 100644 --- a/tenant-cd-api/pom.xml +++ b/tenant-cd-api/pom.xml @@ -25,7 +25,7 @@ This version must match the string in all ExportPackage annotations in this module. It must also be in sync junit version specified in 'hosted-tenant-base'. --> - <hosted-tenant-base-junit-version>5.7.0</hosted-tenant-base-junit-version> + <hosted-tenant-base-junit-version>5.8.1</hosted-tenant-base-junit-version> </properties> @@ -58,6 +58,12 @@ <version>${hosted-tenant-base-junit-version}</version> <scope>compile</scope> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespa-feed-client-api</artifactId> + <version>${project.version}</version> + <scope>compile</scope> + </dependency> </dependencies> <build> diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/package-info.java b/tenant-cd-api/src/main/java/ai/vespa/feed/client/package-info.java index 098506aa86f..3871dc1fa3d 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/package-info.java +++ b/tenant-cd-api/src/main/java/ai/vespa/feed/client/package-info.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. @ExportPackage -package com.yahoo.jdisc.http.servlet; +package ai.vespa.feed.client; -import com.yahoo.osgi.annotation.ExportPackage; +import com.yahoo.osgi.annotation.ExportPackage;
\ No newline at end of file diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/condition/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/condition/package-info.java index 7d14bb7fd18..cc511f67528 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/condition/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/condition/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api.condition; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/extension/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/extension/package-info.java index 14080a1cb51..7ddb6761e06 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/extension/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/extension/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api.extension; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/function/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/function/package-info.java index cd0efd44e3c..72f3fd82347 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/function/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/function/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api.function; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/io/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/io/package-info.java index 84b43e8e243..374aa823308 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/io/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/io/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api.io; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/package-info.java index da5e4f19c1f..75e33914d6c 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-api/src/main/java/org/junit/jupiter/api/parallel/package-info.java b/tenant-cd-api/src/main/java/org/junit/jupiter/api/parallel/package-info.java index be7342d2f29..2abd95827e4 100644 --- a/tenant-cd-api/src/main/java/org/junit/jupiter/api/parallel/package-info.java +++ b/tenant-cd-api/src/main/java/org/junit/jupiter/api/parallel/package-info.java @@ -2,7 +2,7 @@ /** * @author bjorncs */ -@ExportPackage(version = @Version(major = 5, minor = 7, micro = 0)) +@ExportPackage(version = @Version(major = 5, minor = 8, micro = 1)) package org.junit.jupiter.api.parallel; import com.yahoo.osgi.annotation.ExportPackage; diff --git a/tenant-cd-commons/pom.xml b/tenant-cd-commons/pom.xml index 1d76654fbf7..0ada43bca0b 100644 --- a/tenant-cd-commons/pom.xml +++ b/tenant-cd-commons/pom.xml @@ -42,6 +42,18 @@ <artifactId>config-provisioning</artifactId> <version>${project.version}</version> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespa-feed-client</artifactId> + <version>${project.version}</version> + <exclusions> + <exclusion> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespa-feed-client-api</artifactId> + </exclusion> + </exclusions> + <scope>compile</scope> + </dependency> </dependencies> <build> diff --git a/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/EndpointAuthenticator.java b/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/EndpointAuthenticator.java index 90775ab76af..5317cf01e21 100644 --- a/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/EndpointAuthenticator.java +++ b/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/EndpointAuthenticator.java @@ -4,6 +4,7 @@ package ai.vespa.hosted.cd.commons; import javax.net.ssl.SSLContext; import java.net.http.HttpRequest; import java.security.NoSuchAlgorithmException; +import java.util.Map; /** * Adds environment dependent authentication to HTTP request against Vespa deployments. @@ -20,8 +21,7 @@ public interface EndpointAuthenticator { default SSLContext sslContext() { try { return SSLContext.getDefault(); - } - catch (NoSuchAlgorithmException e) { + } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } } @@ -31,4 +31,7 @@ public interface EndpointAuthenticator { return request; } + default Map<String, String> authorizationHeaders() { + return Map.of(); + } } diff --git a/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/FeedClientBuilder.java b/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/FeedClientBuilder.java new file mode 100644 index 00000000000..892c8b63645 --- /dev/null +++ b/tenant-cd-commons/src/main/java/ai/vespa/hosted/cd/commons/FeedClientBuilder.java @@ -0,0 +1,25 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +package ai.vespa.hosted.cd.commons; + +import ai.vespa.feed.client.impl.FeedClientBuilderImpl; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +/** + * @author mortent + */ +public class FeedClientBuilder extends FeedClientBuilderImpl { + + static AtomicReference<EndpointAuthenticator> endpointAuthenticator = new AtomicReference<>(); + + public static void setEndpointAuthenticator(EndpointAuthenticator authenticator) { + endpointAuthenticator.set(authenticator); + } + + public FeedClientBuilder() { + super.setSslContext(Objects.requireNonNull(endpointAuthenticator.get(), FeedClientBuilder.class.getName() + " is not initialized").sslContext()); + endpointAuthenticator.get().authorizationHeaders().forEach(super::addRequestHeader); + } +} diff --git a/tenant-cd-commons/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder b/tenant-cd-commons/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder new file mode 100644 index 00000000000..69073a3951c --- /dev/null +++ b/tenant-cd-commons/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder @@ -0,0 +1,2 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +ai.vespa.hosted.cd.commons.FeedClientBuilder
\ No newline at end of file diff --git a/valgrind-suppressions.txt b/valgrind-suppressions.txt index 75a7b256b35..63fd7857e76 100644 --- a/valgrind-suppressions.txt +++ b/valgrind-suppressions.txt @@ -2,6 +2,13 @@ NPTL keeps a cache of thread stacks, and metadata for thread local storage is not freed for threads in that cache Memcheck:Leak fun:calloc + fun:_dl_allocate_tls + fun:pthread_create@@GLIBC_2.17 +} +{ + NPTL keeps a cache of thread stacks, and metadata for thread local storage is not freed for threads in that cache + Memcheck:Leak + fun:calloc fun:UnknownInlinedFun fun:allocate_dtv fun:_dl_allocate_tls diff --git a/vbench/src/apps/vbench/vbench.cpp b/vbench/src/apps/vbench/vbench.cpp index 00499519dcc..edaa68b8838 100644 --- a/vbench/src/apps/vbench/vbench.cpp +++ b/vbench/src/apps/vbench/vbench.cpp @@ -8,6 +8,8 @@ using namespace vbench; +VESPA_THREAD_STACK_TAG(vbench_thread); + typedef vespalib::SignalHandler SIG; struct NotifyDone : public vespalib::Runnable { @@ -31,8 +33,7 @@ int run(const std::string &cfg_name) { return 1; } vespalib::Slime cfg; - vespalib::Memory mapped_cfg(cfg_file.get().data, - cfg_file.get().size); + vespalib::Memory mapped_cfg(cfg_file.get().data, cfg_file.get().size); if (!vespalib::slime::JsonFormat::decode(mapped_cfg, cfg)) { fprintf(stderr, "unable to parse config file: %s\n", cfg.toString().c_str()); @@ -43,7 +44,7 @@ int run(const std::string &cfg_name) { VBench vbench(cfg); NotifyDone notify(done); vespalib::RunnablePair runBoth(vbench, notify); - vespalib::Thread thread(runBoth); + vespalib::Thread thread(runBoth, vbench_thread); thread.start(); while (!SIG::INT.check() && !SIG::TERM.check() && !done.await(1s)) {} if (!done.await(vespalib::duration::zero())) { diff --git a/vbench/src/tests/dispatcher/dispatcher_test.cpp b/vbench/src/tests/dispatcher/dispatcher_test.cpp index b2c002e3e50..618940aab57 100644 --- a/vbench/src/tests/dispatcher/dispatcher_test.cpp +++ b/vbench/src/tests/dispatcher/dispatcher_test.cpp @@ -17,6 +17,9 @@ struct Fetcher : public vespalib::Runnable { void run() override { handler.handle(provider.provide()); } }; +VESPA_THREAD_STACK_TAG(fetcher1_thread); +VESPA_THREAD_STACK_TAG(fetcher2_thread); + TEST("dispatcher") { MyHandler dropped; MyHandler handler1; @@ -24,8 +27,8 @@ TEST("dispatcher") { Dispatcher<int> dispatcher(dropped); Fetcher fetcher1(dispatcher, handler1); Fetcher fetcher2(dispatcher, handler2); - vespalib::Thread thread1(fetcher1); - vespalib::Thread thread2(fetcher2); + vespalib::Thread thread1(fetcher1, fetcher1_thread); + vespalib::Thread thread2(fetcher2, fetcher2_thread); thread1.start(); EXPECT_TRUE(dispatcher.waitForThreads(1, 512)); thread2.start(); diff --git a/vbench/src/tests/handler_thread/handler_thread_test.cpp b/vbench/src/tests/handler_thread/handler_thread_test.cpp index fd7d630f705..97a12e82ac8 100644 --- a/vbench/src/tests/handler_thread/handler_thread_test.cpp +++ b/vbench/src/tests/handler_thread/handler_thread_test.cpp @@ -15,9 +15,11 @@ struct MyHandler : Handler<int> { MyHandler::~MyHandler() = default; +VESPA_THREAD_STACK_TAG(test_thread); + TEST("handler thread") { MyHandler handler; - HandlerThread<int> th(handler); + HandlerThread<int> th(handler, test_thread); th.handle(std::unique_ptr<int>(new int(1))); th.handle(std::unique_ptr<int>(new int(2))); th.handle(std::unique_ptr<int>(new int(3))); diff --git a/vbench/src/vbench/core/handler_thread.h b/vbench/src/vbench/core/handler_thread.h index b4aaf08eee8..402ecbeb0dc 100644 --- a/vbench/src/vbench/core/handler_thread.h +++ b/vbench/src/vbench/core/handler_thread.h @@ -33,7 +33,7 @@ private: void run() override; public: - HandlerThread(Handler<T> &next); + HandlerThread(Handler<T> &next, init_fun_t init_fun); ~HandlerThread(); void handle(std::unique_ptr<T> obj) override; void join() override; diff --git a/vbench/src/vbench/core/handler_thread.hpp b/vbench/src/vbench/core/handler_thread.hpp index 3d1dc423411..56cc0a7771d 100644 --- a/vbench/src/vbench/core/handler_thread.hpp +++ b/vbench/src/vbench/core/handler_thread.hpp @@ -23,12 +23,12 @@ HandlerThread<T>::run() } template <typename T> -HandlerThread<T>::HandlerThread(Handler<T> &next) +HandlerThread<T>::HandlerThread(Handler<T> &next, init_fun_t init_fun) : _lock(), _cond(), _queue(), _next(next), - _thread(*this), + _thread(*this, init_fun), _done(false) { _thread.start(); diff --git a/vbench/src/vbench/vbench/request_scheduler.cpp b/vbench/src/vbench/vbench/request_scheduler.cpp index 80aec6c308e..95d29181b1f 100644 --- a/vbench/src/vbench/vbench/request_scheduler.cpp +++ b/vbench/src/vbench/vbench/request_scheduler.cpp @@ -1,11 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "request_scheduler.h" - #include <vbench/core/timer.h> namespace vbench { +VESPA_THREAD_STACK_TAG(vbench_request_scheduler_thread); +VESPA_THREAD_STACK_TAG(vbench_handler_thread); + void RequestScheduler::run() { @@ -24,16 +26,16 @@ RequestScheduler::run() RequestScheduler::RequestScheduler(CryptoEngine::SP crypto, Handler<Request> &next, size_t numWorkers) : _timer(), - _proxy(next), + _proxy(next, vbench_handler_thread), _queue(10.0, 0.020), _droppedTagger(_proxy), _dispatcher(_droppedTagger), - _thread(*this), + _thread(*this, vbench_request_scheduler_thread), _connectionPool(std::move(crypto), _timer), _workers() { for (size_t i = 0; i < numWorkers; ++i) { - _workers.push_back(std::unique_ptr<Worker>(new Worker(_dispatcher, _proxy, _connectionPool, _timer))); + _workers.push_back(std::make_unique<Worker>(_dispatcher, _proxy, _connectionPool, _timer)); } _dispatcher.waitForThreads(numWorkers, 256); } diff --git a/vbench/src/vbench/vbench/vbench.cpp b/vbench/src/vbench/vbench/vbench.cpp index d636f7a1cd7..9a5adad262e 100644 --- a/vbench/src/vbench/vbench/vbench.cpp +++ b/vbench/src/vbench/vbench/vbench.cpp @@ -40,6 +40,8 @@ CryptoEngine::SP setup_crypto(const vespalib::slime::Inspector &tls) { } // namespace vbench::<unnamed> +VESPA_THREAD_STACK_TAG(vbench_inputchain_generator); + VBench::VBench(const vespalib::Slime &cfg) : _factory(), _analyzers(), @@ -76,7 +78,7 @@ VBench::VBench(const vespalib::Slime &cfg) } inputChain->generator = _factory.createGenerator(generator, *inputChain->taggers.back()); if (inputChain->generator.get() != 0) { - inputChain->thread.reset(new vespalib::Thread(*inputChain->generator)); + inputChain->thread.reset(new vespalib::Thread(*inputChain->generator, vbench_inputchain_generator)); _inputs.push_back(std::move(inputChain)); } } diff --git a/vbench/src/vbench/vbench/worker.cpp b/vbench/src/vbench/vbench/worker.cpp index a64956f710b..afccc7de39f 100644 --- a/vbench/src/vbench/vbench/worker.cpp +++ b/vbench/src/vbench/vbench/worker.cpp @@ -5,6 +5,8 @@ namespace vbench { +VESPA_THREAD_STACK_TAG(vbench_worker_thread); + void Worker::run() { @@ -22,7 +24,7 @@ Worker::run() Worker::Worker(Provider<Request> &provider, Handler<Request> &next, HttpConnectionPool &pool, Timer &timer) - : _thread(*this), + : _thread(*this, vbench_worker_thread), _provider(provider), _next(next), _pool(pool), diff --git a/vdslib/src/vespa/vdslib/container/documentsummary.h b/vdslib/src/vespa/vdslib/container/documentsummary.h index 2c23e51319b..bbfeb684559 100644 --- a/vdslib/src/vespa/vdslib/container/documentsummary.h +++ b/vdslib/src/vespa/vdslib/container/documentsummary.h @@ -35,7 +35,7 @@ public: private: class Summary { public: - Summary() : _docIdOffset(0), _summaryOffset(0), _summaryLen(0) { } + Summary() noexcept : _docIdOffset(0), _summaryOffset(0), _summaryLen(0) { } Summary(uint32_t docIdOffset, uint32_t summaryOffset, uint32_t summaryLen) : _docIdOffset(docIdOffset), _summaryOffset(summaryOffset), _summaryLen(summaryLen) { } const char * getDocId(const char * base) const { return base + _docIdOffset; } const void * getSummary(const char * base, size_t & sz) const { sz = _summaryLen; return base + _summaryOffset; } diff --git a/vdslib/src/vespa/vdslib/container/searchresult.h b/vdslib/src/vespa/vdslib/container/searchresult.h index 6b6e0aaae12..a777a4731e6 100644 --- a/vdslib/src/vespa/vdslib/container/searchresult.h +++ b/vdslib/src/vespa/vdslib/container/searchresult.h @@ -82,7 +82,7 @@ public: private: class Hit { public: - Hit() : _lid(0), _rank(0), _docIdOffset(0), _index(0) { } + Hit() noexcept : _lid(0), _rank(0), _docIdOffset(0), _index(0) { } Hit(uint32_t lid, RankType rank, size_t docIdOffset, size_t index) : _lid(lid), _rank(rank), _docIdOffset(docIdOffset), _index(index) { } const char * getDocId(const char * base) const { return base + getDocIdOffset(); } uint32_t getLid() const { return _lid; } diff --git a/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.h b/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.h index c7b1a3a05f7..dad7933ed2c 100644 --- a/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.h +++ b/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.h @@ -17,7 +17,7 @@ class RedundancyGroupDistribution : public document::Printable { std::vector<uint16_t> _values; public: - RedundancyGroupDistribution() {} + RedundancyGroupDistribution() noexcept {} /** * Create a group distribution spec from the serialized version. * Asterisk entries are represented as zero. diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java index 4a3dc30d7ed..289a63a1128 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java @@ -300,7 +300,7 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient { .filter(re -> AthenzIdentities.USER_PRINCIPAL_DOMAIN.equals(AthenzIdentities.from(re.memberName()).getDomain())) .collect(Collectors.toUnmodifiableMap( m -> (AthenzUser) AthenzIdentities.from(m.memberName()), - RoleEntity.Member::auditRef)); + m -> m.auditRef() != null ? m.auditRef() : "<no reason provided>")); } @Override @@ -385,6 +385,13 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient { return Set.copyOf(listResponse.entity); } + @Override + public void deleteRole(AthenzRole role) { + URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s", role.domain().getName(), role.roleName())); + HttpUriRequest request = RequestBuilder.delete(uri).build(); + execute(request, response -> readEntity(response, Void.class)); + } + private static Header createCookieHeaderWithOktaTokens(OktaIdentityToken identityToken, OktaAccessToken accessToken) { return new BasicHeader("Cookie", String.format("okta_at=%s; okta_it=%s", accessToken.token(), identityToken.token())); } diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java index 823b5843115..aa038b5bb23 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java @@ -79,5 +79,7 @@ public interface ZmsClient extends AutoCloseable { Set<String> listPolicies(AthenzDomain domain); + void deleteRole(AthenzRole athenzRole); + void close(); } diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java index 5f75ace6ac5..4b54b392d12 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java @@ -1,14 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.athenz.tls; +import com.yahoo.security.SubjectAlternativeName; +import com.yahoo.security.X509CertificateUtils; import com.yahoo.vespa.athenz.api.AthenzIdentity; import com.yahoo.vespa.athenz.api.AthenzRole; import com.yahoo.vespa.athenz.utils.AthenzIdentities; +import java.net.URI; import java.security.cert.X509Certificate; import java.util.List; +import java.util.Optional; +import static com.yahoo.security.SubjectAlternativeName.Type.DNS_NAME; import static com.yahoo.security.SubjectAlternativeName.Type.RFC822_NAME; +import static com.yahoo.security.SubjectAlternativeName.Type.UNIFORM_RESOURCE_IDENTIFIER; /** * Utility methods for Athenz issued x509 certificates @@ -40,4 +46,44 @@ public class AthenzX509CertificateUtils { return AthenzIdentities.from(email.substring(0, separator)); } + /** @return Athenz unique instance id from an Athenz X.509 certificate (specified in the Subject Alternative Name extension) */ + public static Optional<String> getInstanceId(X509Certificate cert) { + return getInstanceId(X509CertificateUtils.getSubjectAlternativeNames(cert)); + } + + /** @return Athenz unique instance id from the Subject Alternative Name extension */ + public static Optional<String> getInstanceId(List<SubjectAlternativeName> sans) { + // Prefer instance id from SAN URI over the legacy DNS entry + return getAthenzUniqueInstanceIdFromSanUri(sans) + .or(() -> getAthenzUniqueInstanceIdFromSanDns(sans)); + } + + private static Optional<String> getAthenzUniqueInstanceIdFromSanUri(List<SubjectAlternativeName> sans) { + String uriPrefix = "athenz://instanceid/"; + return sans.stream() + .filter(san -> { + if (san.getType() != UNIFORM_RESOURCE_IDENTIFIER) return false; + return san.getValue().startsWith(uriPrefix); + }) + .map(san -> { + String uriPath = URI.create(san.getValue()).getPath(); + return uriPath.substring(uriPath.lastIndexOf('/') + 1); // last path segment contains instance id + }) + .findFirst(); + } + + private static Optional<String> getAthenzUniqueInstanceIdFromSanDns(List<SubjectAlternativeName> sans) { + String dnsNameDelimiter = ".instanceid.athenz."; + return sans.stream() + .filter(san -> { + if (san.getType() != DNS_NAME) return false; + return san.getValue().contains(dnsNameDelimiter); + }) + .map(san -> { + String dnsName = san.getValue(); + return dnsName.substring(0, dnsName.indexOf(dnsNameDelimiter)); + }) + .findFirst(); + } + } diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client-api/abi-spec.json index 808fe152fee..a9047365a7a 100644 --- a/vespa-feed-client/abi-spec.json +++ b/vespa-feed-client-api/abi-spec.json @@ -1,20 +1,4 @@ { - "ai.vespa.feed.client.BenchmarkingCluster": { - "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.Cluster" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.Cluster)", - "public void dispatch(ai.vespa.feed.client.HttpRequest, java.util.concurrent.CompletableFuture)", - "public ai.vespa.feed.client.OperationStats stats()", - "public void close()" - ], - "fields": [] - }, "ai.vespa.feed.client.DocumentId": { "superClass": "java.lang.Object", "interfaces": [], @@ -37,21 +21,6 @@ ], "fields": [] }, - "ai.vespa.feed.client.DynamicThrottler": { - "superClass": "ai.vespa.feed.client.StaticThrottler", - "interfaces": [], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.FeedClientBuilder)", - "public void sent(long, java.util.concurrent.CompletableFuture)", - "public void success()", - "public void throttled(long)", - "public long targetInflight()" - ], - "fields": [] - }, "ai.vespa.feed.client.FeedClient$CircuitBreaker$State": { "superClass": "java.lang.Enum", "interfaces": [], @@ -145,29 +114,34 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ "public static ai.vespa.feed.client.FeedClientBuilder create(java.net.URI)", "public static ai.vespa.feed.client.FeedClientBuilder create(java.util.List)", - "public ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)", - "public ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)", - "public ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)", - "public ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)", - "public ai.vespa.feed.client.FeedClientBuilder noBenchmarking()", - "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)", - "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)", - "public ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)", - "public ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)", - "public ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)", - "public ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)", - "public ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)", - "public ai.vespa.feed.client.FeedClient build()" + "public abstract ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)", + "public abstract ai.vespa.feed.client.FeedClientBuilder noBenchmarking()", + "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)", + "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setEndpointUris(java.util.List)", + "public abstract ai.vespa.feed.client.FeedClient build()" ], - "fields": [] + "fields": [ + "public static final java.lang.String PREFERRED_IMPLEMENTATION_PROPERTY" + ] }, "ai.vespa.feed.client.FeedException": { "superClass": "java.lang.RuntimeException", @@ -186,20 +160,18 @@ ], "fields": [] }, - "ai.vespa.feed.client.GracePeriodCircuitBreaker": { + "ai.vespa.feed.client.HttpResponse": { "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.FeedClient$CircuitBreaker" - ], + "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ - "public void <init>(java.time.Duration, java.time.Duration)", - "public void success()", - "public void failure(ai.vespa.feed.client.HttpResponse)", - "public void failure(java.lang.Throwable)", - "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()" + "public abstract int code()", + "public abstract byte[] body()", + "public static ai.vespa.feed.client.HttpResponse of(int, byte[])" ], "fields": [] }, @@ -332,14 +304,15 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ - "public ai.vespa.feed.client.Result$Type type()", - "public ai.vespa.feed.client.DocumentId documentId()", - "public java.util.Optional resultMessage()", - "public java.util.Optional traceMessage()", - "public java.lang.String toString()" + "public abstract ai.vespa.feed.client.Result$Type type()", + "public abstract ai.vespa.feed.client.DocumentId documentId()", + "public abstract java.util.Optional resultMessage()", + "public abstract java.util.Optional traceMessage()" ], "fields": [] }, @@ -366,25 +339,5 @@ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)" ], "fields": [] - }, - "ai.vespa.feed.client.StaticThrottler": { - "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.Throttler" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.FeedClientBuilder)", - "public void sent(long, java.util.concurrent.CompletableFuture)", - "public void success()", - "public void throttled(long)", - "public long targetInflight()" - ], - "fields": [ - "protected final long maxInflight", - "protected final long minInflight" - ] } }
\ No newline at end of file diff --git a/vespa-feed-client-api/pom.xml b/vespa-feed-client-api/pom.xml new file mode 100644 index 00000000000..df5fd531f06 --- /dev/null +++ b/vespa-feed-client-api/pom.xml @@ -0,0 +1,57 @@ +<?xml version="1.0"?> +<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. --> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>com.yahoo.vespa</groupId> + <artifactId>parent</artifactId> + <version>7-SNAPSHOT</version> + <relativePath>../parent/pom.xml</relativePath> + </parent> + <artifactId>vespa-feed-client-api</artifactId> + <packaging>jar</packaging> + <version>7-SNAPSHOT</version> + + <dependencies> + <!-- compile scope --> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>annotations</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-core</artifactId> + <scope>compile</scope> + </dependency> + + <!-- test scope --> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter</artifactId> + <scope>test</scope> + </dependency> + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + <configuration> + <release>${vespaClients.jdk.releaseVersion}</release> + <showDeprecation>true</showDeprecation> + <compilerArgs> + <arg>-Xlint:all</arg> + <arg>-Xlint:-serial</arg> + <arg>-Werror</arg> + </compilerArgs> + </configuration> + </plugin> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + </plugins> + </build> +</project> diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java index 5474bcfda01..5474bcfda01 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java index d463c611d6a..d463c611d6a 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java new file mode 100644 index 00000000000..05bc608df27 --- /dev/null +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java @@ -0,0 +1,138 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; +import java.nio.file.Path; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ServiceLoader; +import java.util.function.Supplier; + +/** + * Builder for creating a {@link FeedClient} instance. + * + * @author bjorncs + * @author jonmv + */ +public interface FeedClientBuilder { + + String PREFERRED_IMPLEMENTATION_PROPERTY = "vespa.feed.client.builder.implementation"; + + /** Creates a builder for a single container endpoint **/ + static FeedClientBuilder create(URI endpoint) { return create(Collections.singletonList(endpoint)); } + + /** Creates a builder for multiple container endpoints **/ + static FeedClientBuilder create(List<URI> endpoints) { + String defaultImplementation = "ai.vespa.feed.client.impl.FeedClientBuilderImpl"; + String preferredImplementation = System.getProperty(PREFERRED_IMPLEMENTATION_PROPERTY, defaultImplementation); + Iterator<FeedClientBuilder> iterator = ServiceLoader.load(FeedClientBuilder.class).iterator(); + if (iterator.hasNext()) { + List<FeedClientBuilder> builders = new ArrayList<>(); + iterator.forEachRemaining(builders::add); + return builders.stream() + .filter(builder -> preferredImplementation.equals(builder.getClass().getName())) + .findFirst() + .orElse(builders.get(0)); + } else { + try { + Class<?> aClass = Class.forName(preferredImplementation); + for (Constructor<?> constructor : aClass.getConstructors()) { + if (constructor.getParameterTypes().length==0) { + return ((FeedClientBuilder)constructor.newInstance()).setEndpointUris(endpoints); + } + } + throw new RuntimeException("Could not find Feed client builder implementation"); + } catch (ClassNotFoundException | InvocationTargetException | InstantiationException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + } + + /** + * Sets the number of connections this client will use per endpoint. + * + * A reasonable value here is a value that lets all feed clients (if more than one) + * collectively have a number of connections which is a small multiple of the numbers + * of containers in the cluster to feed, so load can be balanced across these containers. + * In general, this value should be kept as low as possible, but poor connectivity + * between feeder and cluster may also warrant a higher number of connections. + */ + FeedClientBuilder setConnectionsPerEndpoint(int max); + + /** + * Sets the maximum number of streams per HTTP/2 connection for this client. + * + * This determines the maximum number of concurrent, inflight requests for this client, + * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over + * more connections, when possible. + * The feed client automatically throttles load to achieve the best throughput, and the + * actual number of streams per connection is usually lower than the maximum. + */ + FeedClientBuilder setMaxStreamPerConnection(int max); + + /** Sets {@link SSLContext} instance. */ + FeedClientBuilder setSslContext(SSLContext context); + + /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */ + FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier); + + /** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */ + FeedClientBuilder noBenchmarking(); + + /** Adds HTTP request header to all client requests. */ + FeedClientBuilder addRequestHeader(String name, String value); + + /** + * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request, + * i.e. value can be dynamically updated during a feed. + */ + FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier); + + /** + * Overrides default retry strategy. + * @see FeedClient.RetryStrategy + */ + FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy); + + /** + * Overrides default circuit breaker. + * @see FeedClient.CircuitBreaker + */ + FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker); + + /** Sets path to client SSL certificate/key PEM files */ + FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile); + + /** Sets client SSL certificates/key */ + FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey); + + /** Sets client SSL certificate/key */ + FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey); + + FeedClientBuilder setDryrun(boolean enabled); + + /** + * Overrides JVM default SSL truststore + * @param caCertificatesFile Path to PEM encoded file containing trusted certificates + */ + FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile); + + /** Overrides JVM default SSL truststore */ + FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates); + + /** Overrides endpoint URIs for this client */ + FeedClientBuilder setEndpointUris(List<URI> endpoints); + + /** Constructs instance of {@link FeedClient} from builder configuration */ + FeedClient build(); + +} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java index 1936eb09418..1936eb09418 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java index 07fdb2d7257..62850fef32d 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; -interface HttpResponse { +public interface HttpResponse { int code(); byte[] body(); diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java index 2d7caea9f26..41b432449df 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java @@ -387,13 +387,13 @@ public class JsonFeeder implements Closeable { CompletableFuture<Result> next() throws IOException { JsonToken token = parser.nextToken(); - if (multipleOperations && ! arrayPrefixParsed && token == START_ARRAY) { + if (multipleOperations && ! arrayPrefixParsed && token == JsonToken.START_ARRAY) { arrayPrefixParsed = true; token = parser.nextToken(); } - if (token == END_ARRAY && multipleOperations) return null; + if (token == JsonToken.END_ARRAY && multipleOperations) return null; else if (token == null && ! arrayPrefixParsed) return null; - else if (token != START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'"); + else if (token != JsonToken.START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'"); long start = 0, end = -1; OperationType type = null; DocumentId id = null; @@ -459,8 +459,8 @@ public class JsonFeeder implements Closeable { private String readString() throws IOException { String value = parser.nextTextValue(); if (value == null) - throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + - ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); + throw new OperationParseException("Expected '" + JsonToken.VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; } @@ -468,8 +468,8 @@ public class JsonFeeder implements Closeable { private boolean readBoolean() throws IOException { Boolean value = parser.nextBooleanValue(); if (value == null) - throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + - ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); + throw new OperationParseException("Expected '" + JsonToken.VALUE_FALSE + "' or '" + JsonToken.VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java index 0ec40e114df..0ec40e114df 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java index f60368dd67f..4404462be2e 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.FeedException; + /** * Signals that supplied JSON for a document/operation is invalid * diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java index ab2faf245d8..ab2faf245d8 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java new file mode 100644 index 00000000000..fa114f6a183 --- /dev/null +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java @@ -0,0 +1,23 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +import java.util.Optional; + +/** + * Result for a document operation which completed normally. + * + * @author bjorncs + * @author jonmv + */ +public interface Result { + + enum Type { + success, + conditionNotMet + } + + Type type(); + DocumentId documentId(); + Optional<String> resultMessage(); + Optional<String> traceMessage(); +} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java index d9eaff40d74..27803898c01 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java @@ -1,6 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.OperationParameters; + import java.util.Optional; /** diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java index 947ab9f0560..f149b13196b 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedException; + /** * Signals that the client was unable to obtain a proper response/result from container * diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java index daab16a9ff2..daab16a9ff2 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java index e4fb5cb5bef..d795678db39 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; @@ -14,6 +15,7 @@ import java.util.Collection; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -148,7 +150,7 @@ class JsonFeederTest { " }\n" + " }\n"; Result result = feeder.feedSingle(json).get(); - assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId()); + Assertions.assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId()); assertEquals(Result.Type.success, result.type()); assertEquals("success", result.resultMessage().get()); client.assertPutOperation("abc1", "{\"fields\":{\n \"lul\":\"lal\"\n }}"); @@ -188,7 +190,12 @@ class JsonFeederTest { public void close(boolean graceful) { } private CompletableFuture<Result> createSuccessResult(DocumentId documentId) { - return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null)); + return CompletableFuture.completedFuture(new Result(){ + @Override public Type type() { return Type.success; } + @Override public DocumentId documentId() { return documentId; } + @Override public Optional<String> resultMessage() { return Optional.of("success"); } + @Override public Optional<String> traceMessage() { return Optional.empty(); } + }); } void assertDocumentIds(Collection<DocumentId> keys, String... expectedUserSpecificIds) { diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java index b951fb62fb5..b951fb62fb5 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java index 3d4ce150fcf..3d4ce150fcf 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java index 4e6473a6568..4e6473a6568 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml index aff625fe3a4..16d6f8827f2 100644 --- a/vespa-feed-client-cli/pom.xml +++ b/vespa-feed-client-cli/pom.xml @@ -74,7 +74,7 @@ <attach>false</attach> <archive> <manifest> - <mainClass>ai.vespa.feed.client.CliClient</mainClass> + <mainClass>ai.vespa.feed.client.impl.CliClient</mainClass> </manifest> </archive> <descriptorRefs> diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java index 0de81d2de36..2fc7e5af7b4 100644 --- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java +++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java index e40b543f26a..7e036b8dec3 100644 --- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java +++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java @@ -1,7 +1,14 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.JsonFeeder; import ai.vespa.feed.client.JsonFeeder.ResultCallback; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh index b236a516691..c4e70c362b0 100755 --- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh +++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh @@ -6,4 +6,4 @@ exec java \ -Xms128m -Xmx2048m \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ -Djava.util.logging.config.file=`dirname $0`/logging.properties \ --cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@" +-cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@" diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh index fbd172e7423..7dbdc056524 100755 --- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh +++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh @@ -81,4 +81,4 @@ exec java \ -Xms128m -Xmx2048m $(getJavaOptionsIPV46) \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ -Djava.util.logging.config.file=${VESPA_HOME}/conf/vespa-feed-client/logging.properties \ --cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@" +-cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@" diff --git a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java index 622956db530..19b93c3172b 100644 --- a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java +++ b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.impl.CliArguments; import org.junit.jupiter.api.Test; import java.io.ByteArrayOutputStream; diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml index 68c9e4b4b7c..a53e7f78b20 100644 --- a/vespa-feed-client/pom.xml +++ b/vespa-feed-client/pom.xml @@ -34,6 +34,11 @@ <artifactId>jackson-core</artifactId> <scope>compile</scope> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespa-feed-client-api</artifactId> + <version>${project.version}</version> + </dependency> <!-- test scope --> <dependency> @@ -72,17 +77,13 @@ <executable>src/main/sh/vespa-version-generator.sh</executable> <arguments> <argument>${project.basedir}/../dist/vtag.map</argument> - <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/Vespa.java</argument> + <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/impl/Vespa.java</argument> </arguments> <sourceRoot>${project.build.directory}/generated-sources/vespa-version</sourceRoot> </configuration> </execution> </executions> </plugin> - <plugin> - <groupId>com.yahoo.vespa</groupId> - <artifactId>abi-check-plugin</artifactId> - </plugin> </plugins> </build> </project> diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java index 52d7af2fb31..1874bd42e16 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.HttpResponse; import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; import org.apache.hc.client5.http.async.methods.SimpleHttpResponse; import org.apache.hc.client5.http.config.RequestConfig; @@ -18,11 +19,14 @@ import org.apache.hc.core5.util.Timeout; import javax.net.ssl.SSLContext; import java.io.IOException; import java.net.URI; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.nio.charset.StandardCharsets.UTF_8; @@ -40,10 +44,12 @@ class ApacheCluster implements Cluster { private final RequestConfig defaultConfig = RequestConfig.custom() .setConnectTimeout(Timeout.ofSeconds(10)) .setConnectionRequestTimeout(Timeout.DISABLED) - .setResponseTimeout(Timeout.ofMinutes(5)) + .setResponseTimeout(Timeout.ofSeconds(190)) .build(); - ApacheCluster(FeedClientBuilder builder) throws IOException { + private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread")); + + ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (URI endpoint : builder.endpoints) for (int i = 0; i < builder.connectionsPerEndpoint; i++) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); @@ -59,6 +65,7 @@ class ApacheCluster implements Cluster { min = endpoints.get(i).inflight.get(); } Endpoint endpoint = endpoints.get(index); + endpoint.inflight.incrementAndGet(); try { SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path()); @@ -70,13 +77,15 @@ class ApacheCluster implements Cluster { if (wrapped.body() != null) request.setBody(wrapped.body(), ContentType.APPLICATION_JSON); - endpoint.inflight.incrementAndGet(); - endpoint.client.execute(request, - new FutureCallback<SimpleHttpResponse>() { - @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } - @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } - @Override public void cancelled() { vessel.cancel(false); } - }); + Future<?> future = endpoint.client.execute(request, + new FutureCallback<SimpleHttpResponse>() { + @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); } + @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); } + @Override public void cancelled() { vessel.cancel(false); } + }); + long timeoutMillis = wrapped.timeout() == null ? 200_000 : wrapped.timeout().toMillis() * 11 / 10 + 1_000; + Future<?> cancellation = executor.schedule(() -> { future.cancel(true); vessel.cancel(true); }, timeoutMillis, TimeUnit.MILLISECONDS); + vessel.whenComplete((__, ___) -> cancellation.cancel(true)); } catch (Throwable thrown) { vessel.completeExceptionally(thrown); @@ -87,7 +96,7 @@ class ApacheCluster implements Cluster { @Override public void close() { Throwable thrown = null; - for (Endpoint endpoint : endpoints) + for (Endpoint endpoint : endpoints) { try { endpoint.client.close(); } @@ -95,6 +104,8 @@ class ApacheCluster implements Cluster { if (thrown == null) thrown = t; else thrown.addSuppressed(t); } + } + executor.shutdownNow().forEach(Runnable::run); if (thrown != null) throw new RuntimeException(thrown); } @@ -114,7 +125,7 @@ class ApacheCluster implements Cluster { } - private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder) throws IOException { + private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java index 05ff6e99308..40049bad217 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.util.HashMap; import java.util.Map; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java index 57c028426fe..ee9188fdc2b 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java @@ -1,8 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.io.Closeable; -import java.util.Collections; import java.util.concurrent.CompletableFuture; /** diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java index 282e4e14285..96cf7998681 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.nio.charset.StandardCharsets; import java.time.Duration; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java index a379a8b066b..5969fe267c0 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java @@ -1,7 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; -import java.util.Arrays; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; @@ -25,7 +26,7 @@ public class DynamicThrottler extends StaticThrottler { private long startNanos = System.nanoTime(); private long sent = 0; - public DynamicThrottler(FeedClientBuilder builder) { + public DynamicThrottler(FeedClientBuilderImpl builder) { super(builder); targetInflight = new AtomicLong(8 * minInflight); } diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java index cdf55f0ba7e..7dafeb0b541 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; @@ -16,6 +19,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Supplier; import static java.util.Objects.requireNonNull; @@ -26,18 +30,18 @@ import static java.util.Objects.requireNonNull; * @author bjorncs * @author jonmv */ -public class FeedClientBuilder { +public class FeedClientBuilderImpl implements FeedClientBuilder { static final FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { }; - final List<URI> endpoints; + List<URI> endpoints; final Map<String, Supplier<String>> requestHeaders = new HashMap<>(); SSLContext sslContext; HostnameVerifier hostnameVerifier; int connectionsPerEndpoint = 4; int maxStreamsPerConnection = 4096; FeedClient.RetryStrategy retryStrategy = defaultRetryStrategy; - FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Duration.ofSeconds(10), Duration.ofMinutes(10)); + FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Duration.ofSeconds(10)); Path certificateFile; Path privateKeyFile; Path caCertificatesFile; @@ -47,72 +51,65 @@ public class FeedClientBuilder { boolean benchmark = true; boolean dryrun = false; - /** Creates a builder for a single container endpoint **/ - public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(Collections.singletonList(endpoint)); } - /** Creates a builder for multiple container endpoints **/ - public static FeedClientBuilder create(List<URI> endpoints) { return new FeedClientBuilder(endpoints); } - private FeedClientBuilder(List<URI> endpoints) { + public FeedClientBuilderImpl() { + } + + FeedClientBuilderImpl(List<URI> endpoints) { + this(); + setEndpointUris(endpoints); + } + + @Override + public FeedClientBuilder setEndpointUris(List<URI> endpoints) { if (endpoints.isEmpty()) throw new IllegalArgumentException("At least one endpoint must be provided"); for (URI endpoint : endpoints) requireNonNull(endpoint.getHost()); - this.endpoints = new ArrayList<>(endpoints); + return this; } - /** - * Sets the number of connections this client will use per endpoint. - * - * A reasonable value here is a value that lets all feed clients (if more than one) - * collectively have a number of connections which is a small multiple of the numbers - * of containers in the cluster to feed, so load can be balanced across these containers. - * In general, this value should be kept as low as possible, but poor connectivity - * between feeder and cluster may also warrant a higher number of connections. - */ - public FeedClientBuilder setConnectionsPerEndpoint(int max) { + @Override + public FeedClientBuilderImpl setConnectionsPerEndpoint(int max) { if (max < 1) throw new IllegalArgumentException("Max connections must be at least 1, but was " + max); this.connectionsPerEndpoint = max; return this; } - /** - * Sets the maximum number of streams per HTTP/2 connection for this client. - * - * This determines the maximum number of concurrent, inflight requests for this client, - * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over - * more connections, when possible. - * The feed client automatically throttles load to achieve the best throughput, and the - * actual number of streams per connection is usually lower than the maximum. - */ - public FeedClientBuilder setMaxStreamPerConnection(int max) { + @Override + public FeedClientBuilderImpl setMaxStreamPerConnection(int max) { if (max < 1) throw new IllegalArgumentException("Max streams per connection must be at least 1, but was " + max); this.maxStreamsPerConnection = max; return this; } /** Sets {@link SSLContext} instance. */ - public FeedClientBuilder setSslContext(SSLContext context) { + @Override + public FeedClientBuilderImpl setSslContext(SSLContext context) { this.sslContext = requireNonNull(context); return this; } /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */ - public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) { + @Override + public FeedClientBuilderImpl setHostnameVerifier(HostnameVerifier verifier) { this.hostnameVerifier = requireNonNull(verifier); return this; } /** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */ - public FeedClientBuilder noBenchmarking() { + @Override + public FeedClientBuilderImpl noBenchmarking() { this.benchmark = false; return this; } /** Adds HTTP request header to all client requests. */ - public FeedClientBuilder addRequestHeader(String name, String value) { + @Override + public FeedClientBuilderImpl addRequestHeader(String name, String value) { return addRequestHeader(name, () -> requireNonNull(value)); } @@ -120,7 +117,8 @@ public class FeedClientBuilder { * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request, * i.e. value can be dynamically updated during a feed. */ - public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) { + @Override + public FeedClientBuilderImpl addRequestHeader(String name, Supplier<String> valueSupplier) { this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier)); return this; } @@ -129,7 +127,8 @@ public class FeedClientBuilder { * Overrides default retry strategy. * @see FeedClient.RetryStrategy */ - public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) { + @Override + public FeedClientBuilderImpl setRetryStrategy(FeedClient.RetryStrategy strategy) { this.retryStrategy = requireNonNull(strategy); return this; } @@ -138,31 +137,36 @@ public class FeedClientBuilder { * Overrides default circuit breaker. * @see FeedClient.CircuitBreaker */ - public FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker) { + @Override + public FeedClientBuilderImpl setCircuitBreaker(FeedClient.CircuitBreaker breaker) { this.circuitBreaker = requireNonNull(breaker); return this; } /** Sets path to client SSL certificate/key PEM files */ - public FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile) { + @Override + public FeedClientBuilderImpl setCertificate(Path certificatePemFile, Path privateKeyPemFile) { this.certificateFile = certificatePemFile; this.privateKeyFile = privateKeyPemFile; return this; } /** Sets client SSL certificates/key */ - public FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) { + @Override + public FeedClientBuilderImpl setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) { this.certificate = certificate; this.privateKey = privateKey; return this; } /** Sets client SSL certificate/key */ - public FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey) { + @Override + public FeedClientBuilderImpl setCertificate(X509Certificate certificate, PrivateKey privateKey) { return setCertificate(Collections.singletonList(certificate), privateKey); } - public FeedClientBuilder setDryrun(boolean enabled) { + @Override + public FeedClientBuilderImpl setDryrun(boolean enabled) { this.dryrun = enabled; return this; } @@ -171,18 +175,21 @@ public class FeedClientBuilder { * Overrides JVM default SSL truststore * @param caCertificatesFile Path to PEM encoded file containing trusted certificates */ - public FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile) { + @Override + public FeedClientBuilderImpl setCaCertificatesFile(Path caCertificatesFile) { this.caCertificatesFile = caCertificatesFile; return this; } /** Overrides JVM default SSL truststore */ - public FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates) { + @Override + public FeedClientBuilderImpl setCaCertificates(Collection<X509Certificate> caCertificates) { this.caCertificates = caCertificates; return this; } /** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */ + @Override public FeedClient build() { try { validateConfiguration(); @@ -209,6 +216,9 @@ public class FeedClientBuilder { } private void validateConfiguration() { + if (endpoints == null) { + throw new IllegalArgumentException("At least one endpoint must be provided"); + } if (sslContext != null && ( certificateFile != null || caCertificatesFile != null || privateKeyFile != null || certificate != null || caCertificates != null || privateKey != null)) { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java index b878840d70f..b223fce7cab 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.HttpResponse; import java.time.Duration; import java.util.concurrent.atomic.AtomicBoolean; @@ -31,20 +34,32 @@ public class GracePeriodCircuitBreaker implements FeedClient.CircuitBreaker { private final long graceMillis; private final long doomMillis; + /** + * Creates a new circuit breaker with the given grace periods. + * @param grace the period of consecutive failures before state changes to half-open. + */ + public GracePeriodCircuitBreaker(Duration grace) { + this(System::currentTimeMillis, grace, null); + } + + /** + * Creates a new circuit breaker with the given grace periods. + * @param grace the period of consecutive failures before state changes to half-open. + * @param doom the period of consecutive failures before shutting down. + */ public GracePeriodCircuitBreaker(Duration grace, Duration doom) { this(System::currentTimeMillis, grace, doom); + if (doom.isNegative()) + throw new IllegalArgumentException("Doom delay must be non-negative"); } GracePeriodCircuitBreaker(LongSupplier clock, Duration grace, Duration doom) { if (grace.isNegative()) throw new IllegalArgumentException("Grace delay must be non-negative"); - if (doom.isNegative()) - throw new IllegalArgumentException("Doom delay must be non-negative"); - this.clock = requireNonNull(clock); this.graceMillis = grace.toMillis(); - this.doomMillis = doom.toMillis(); + this.doomMillis = doom == null ? -1 : doom.toMillis(); } @Override @@ -74,11 +89,11 @@ public class GracePeriodCircuitBreaker implements FeedClient.CircuitBreaker { long failingMillis = clock.getAsLong() - failingSinceMillis.get(); if (failingMillis > graceMillis && halfOpen.compareAndSet(false, true)) log.log(INFO, "Circuit breaker is now half-open, as no requests have succeeded for the " + - "last " + failingMillis + "ms. The server will be pinged to see if it recovers, " + - "but this client will give up if no successes are observed within " + doomMillis + "ms. " + - "First failure was '" + detail.get() + "'."); + "last " + failingMillis + "ms. The server will be pinged to see if it recovers" + + (doomMillis >= 0 ? ", but this client will give up if no successes are observed within " + doomMillis + "ms" : "") + + ". First failure was '" + detail.get() + "'."); - if (failingMillis > doomMillis && open.compareAndSet(false, true)) + if (doomMillis >= 0 && failingMillis > doomMillis && open.compareAndSet(false, true)) log.log(WARNING, "Circuit breaker is now open, after " + doomMillis + "ms of failing request, " + "and this client will give up and abort its remaining feed operations."); diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java index eb818ba1d48..c136d697a0b 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java @@ -1,6 +1,15 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; - +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; +import ai.vespa.feed.client.ResultParseException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; @@ -33,11 +42,11 @@ class HttpFeedClient implements FeedClient { private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); - HttpFeedClient(FeedClientBuilder builder) throws IOException { + HttpFeedClient(FeedClientBuilderImpl builder) throws IOException { this(builder, new HttpRequestStrategy(builder)); } - HttpFeedClient(FeedClientBuilder builder, RequestStrategy requestStrategy) { + HttpFeedClient(FeedClientBuilderImpl builder, RequestStrategy requestStrategy) { this.requestHeaders = new HashMap<>(builder.requestHeaders); this.requestStrategy = requestStrategy; } @@ -83,7 +92,8 @@ class HttpFeedClient implements FeedClient { HttpRequest request = new HttpRequest(method, getPath(documentId) + getQuery(params), requestHeaders, - operationJson == null ? null : operationJson.getBytes(UTF_8)); // TODO: make it bytes all the way? + operationJson == null ? null : operationJson.getBytes(UTF_8), // TODO: make it bytes all the way? + params.timeout().orElse(null)); CompletableFuture<Result> promise = new CompletableFuture<>(); requestStrategy.enqueue(documentId, request) @@ -173,7 +183,7 @@ class HttpFeedClient implements FeedClient { if (outcome == Outcome.vespaFailure) throw new ResultException(documentId, message, trace); - return new Result(toResultType(outcome), documentId, message, trace); + return new ResultImpl(toResultType(outcome), documentId, message, trace); } static String getPath(DocumentId documentId) { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java index 48defd71ea8..0ad7b82347e 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import java.time.Duration; import java.util.Map; import java.util.function.Supplier; @@ -10,12 +11,14 @@ class HttpRequest { private final String path; private final Map<String, Supplier<String>> headers; private final byte[] body; + private final Duration timeout; - public HttpRequest(String method, String path, Map<String, Supplier<String>> headers, byte[] body) { + public HttpRequest(String method, String path, Map<String, Supplier<String>> headers, byte[] body, Duration timeout) { this.method = method; this.path = path; this.headers = headers; this.body = body; + this.timeout = timeout; } public String method() { @@ -34,6 +37,10 @@ class HttpRequest { return body; } + public Duration timeout() { + return timeout; + } + @Override public String toString() { return method + " " + path; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java index cf65a874f3b..6fec0029bc3 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java @@ -1,8 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; import ai.vespa.feed.client.FeedClient.CircuitBreaker; import ai.vespa.feed.client.FeedClient.RetryStrategy; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse ; +import ai.vespa.feed.client.OperationStats; import java.io.IOException; import java.nio.channels.CancelledKeyException; @@ -62,11 +67,11 @@ class HttpRequestStrategy implements RequestStrategy { return thread; }); - HttpRequestStrategy(FeedClientBuilder builder) throws IOException { + HttpRequestStrategy(FeedClientBuilderImpl builder) throws IOException { this(builder, builder.dryrun ? new DryrunCluster() : new ApacheCluster(builder)); } - HttpRequestStrategy(FeedClientBuilder builder, Cluster cluster) { + HttpRequestStrategy(FeedClientBuilderImpl builder, Cluster cluster) { this.cluster = builder.benchmark ? new BenchmarkingCluster(cluster) : cluster; this.strategy = builder.retryStrategy; this.breaker = builder.circuitBreaker; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java index 9a97f7daa66..e3b6b594593 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java @@ -1,7 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; import ai.vespa.feed.client.FeedClient.CircuitBreaker.State; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.util.concurrent.CompletableFuture; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java index 5ff3fd0a219..dabf76cba34 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.Result; import java.util.Optional; @@ -9,29 +12,24 @@ import java.util.Optional; * @author bjorncs * @author jonmv */ -public class Result { +public class ResultImpl implements Result { private final Type type; private final DocumentId documentId; private final String resultMessage; private final String traceMessage; - Result(Type type, DocumentId documentId, String resultMessage, String traceMessage) { + ResultImpl(Type type, DocumentId documentId, String resultMessage, String traceMessage) { this.type = type; this.documentId = documentId; this.resultMessage = resultMessage; this.traceMessage = traceMessage; } - public enum Type { - success, - conditionNotMet - } - - public Type type() { return type; } - public DocumentId documentId() { return documentId; } - public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); } - public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); } + @Override public Type type() { return type; } + @Override public DocumentId documentId() { return documentId; } + @Override public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); } + @Override public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); } @Override public String toString() { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java index f5e13eccd56..2ca4577abe6 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java index 5137a18d923..1f9cf8e5155 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; @@ -18,7 +20,7 @@ public class StaticThrottler implements Throttler { protected final long minInflight; private final AtomicLong targetX10; - public StaticThrottler(FeedClientBuilder builder) { + public StaticThrottler(FeedClientBuilderImpl builder) { minInflight = 16L * builder.connectionsPerEndpoint * builder.endpoints.size(); maxInflight = 256 * minInflight; // 4096 max streams per connection on the server side. targetX10 = new AtomicLong(10 * maxInflight); // 10x the actual value to allow for smaller updates. diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java index f2453c27879..700a6f6f805 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.util.concurrent.CompletableFuture; diff --git a/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder new file mode 100644 index 00000000000..b6e28b1806c --- /dev/null +++ b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder @@ -0,0 +1,2 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +ai.vespa.feed.client.impl.FeedClientBuilderImpl
\ No newline at end of file diff --git a/vespa-feed-client/src/main/sh/vespa-version-generator.sh b/vespa-feed-client/src/main/sh/vespa-version-generator.sh index 5aafb3e2bf7..44fb7d167db 100755 --- a/vespa-feed-client/src/main/sh/vespa-version-generator.sh +++ b/vespa-feed-client/src/main/sh/vespa-version-generator.sh @@ -16,7 +16,7 @@ mkdir -p $destinationDir versionNumber=$(cat $source | grep V_TAG_COMPONENT | awk '{print $2}' ) cat > $destination <<- END -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; class Vespa { static final String VERSION = "$versionNumber"; diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java index df790056309..61526b80fe7 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -14,8 +16,8 @@ class DocumentIdTest { @Test void testParsing() { - assertEquals("id:ns:type::user", - DocumentId.of("id:ns:type::user").toString()); + Assertions.assertEquals("id:ns:type::user", + DocumentId.of("id:ns:type::user").toString()); assertEquals("id:ns:type:n=123:user", DocumentId.of("id:ns:type:n=123:user").toString()); diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java index 8eaffc3e9be..b7dac5ce52e 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import ai.vespa.feed.client.FeedClient.CircuitBreaker; import org.junit.jupiter.api.Test; diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java index d92958a5838..5353ab92fb6 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java @@ -1,10 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; import org.junit.jupiter.api.Test; import java.net.URI; import java.time.Duration; +import java.util.Collections; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -33,7 +42,7 @@ class HttpFeedClientTest { @Override public void await() { throw new UnsupportedOperationException(); } @Override public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) { return dispatch.get().apply(documentId, request); } } - FeedClient client = new HttpFeedClient(FeedClientBuilder.create(URI.create("https://dummy:123")), new MockRequestStrategy()); + FeedClient client = new HttpFeedClient(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy:123"))), new MockRequestStrategy()); // Update is a PUT, and 200 OK is a success. dispatch.set((documentId, request) -> { diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java index 11b844f2c69..d7be4ead078 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java @@ -1,19 +1,23 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; import ai.vespa.feed.client.FeedClient.CircuitBreaker; -import org.apache.hc.core5.http.ContentType; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import org.junit.jupiter.api.Test; import java.io.IOException; import java.net.URI; import java.time.Duration; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Phaser; import java.util.concurrent.ScheduledExecutorService; @@ -37,12 +41,12 @@ class HttpRequestStrategyTest { @Test void testConcurrency() { int documents = 1 << 16; - HttpRequest request = new HttpRequest("PUT", "/", null, null); + HttpRequest request = new HttpRequest("PUT", "/", null, null, null); HttpResponse response = HttpResponse.of(200, "{}".getBytes(UTF_8)); ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); Cluster cluster = new BenchmarkingCluster((__, vessel) -> executor.schedule(() -> vessel.complete(response), (int) (Math.random() * 2 * 10), TimeUnit.MILLISECONDS)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy( new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setConnectionsPerEndpoint(1 << 10) .setMaxStreamPerConnection(1 << 12), cluster); @@ -82,7 +86,7 @@ class HttpRequestStrategyTest { MockCluster cluster = new MockCluster(); AtomicLong now = new AtomicLong(0); CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setRetryStrategy(new FeedClient.RetryStrategy() { @Override public boolean retry(FeedClient.OperationType type) { return type == FeedClient.OperationType.PUT; } @Override public int retries() { return 1; } @@ -95,7 +99,7 @@ class HttpRequestStrategyTest { DocumentId id1 = DocumentId.of("ns", "type", "1"); DocumentId id2 = DocumentId.of("ns", "type", "2"); - HttpRequest request = new HttpRequest("POST", "/", null, null); + HttpRequest request = new HttpRequest("POST", "/", null, null, null); // Runtime exception is not retried. cluster.expect((__, vessel) -> vessel.completeExceptionally(new RuntimeException("boom"))); @@ -136,8 +140,8 @@ class HttpRequestStrategyTest { else vessel.complete(success); }); CompletableFuture<HttpResponse> delayed = strategy.enqueue(id1, request); - CompletableFuture<HttpResponse> serialised = strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null)); - assertEquals(success, strategy.enqueue(id2, new HttpRequest("DELETE", "/", null, null)).get()); + CompletableFuture<HttpResponse> serialised = strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null, null)); + assertEquals(success, strategy.enqueue(id2, new HttpRequest("DELETE", "/", null, null, null)).get()); latch.await(); assertEquals(8, strategy.stats().requests()); // 3 attempts at throttled and one at id2. now.set(4000); @@ -155,7 +159,7 @@ class HttpRequestStrategyTest { // Error responses are not retried when not of appropriate type. cluster.expect((__, vessel) -> vessel.complete(serverError)); - assertEquals(serverError, strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null)).get()); + assertEquals(serverError, strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null, null)).get()); assertEquals(12, strategy.stats().requests()); // Some error responses are not retried. @@ -189,7 +193,7 @@ class HttpRequestStrategyTest { MockCluster cluster = new MockCluster(); AtomicLong now = new AtomicLong(0); CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setRetryStrategy(new FeedClient.RetryStrategy() { @Override public int retries() { return 1; } }) @@ -201,23 +205,23 @@ class HttpRequestStrategyTest { DocumentId id2 = DocumentId.of("ns", "type", "2"); DocumentId id3 = DocumentId.of("ns", "type", "3"); DocumentId id4 = DocumentId.of("ns", "type", "4"); - HttpRequest failing = new HttpRequest("POST", "/", null, null); - HttpRequest request = new HttpRequest("POST", "/", null, null); - HttpRequest blocking = new HttpRequest("POST", "/", null, null); + HttpRequest failing = new HttpRequest("POST", "/", null, null, null); + HttpRequest request = new HttpRequest("POST", "/", null, null, null); + HttpRequest blocking = new HttpRequest("POST", "/", null, null, null); // Enqueue some operations to the same id, which are serialised, and then shut down while operations are in flight. Phaser phaser = new Phaser(2); Phaser blocker = new Phaser(2); - AtomicReference<CompletableFuture<HttpResponse>> completion = new AtomicReference<>(); cluster.expect((req, vessel) -> { if (req == blocking) { - phaser.arriveAndAwaitAdvance(); // Synchronise with tst main thread, and then ... + phaser.arriveAndAwaitAdvance(); // Synchronise with test main thread, and then ... blocker.arriveAndAwaitAdvance(); // ... block dispatch thread, so we get something in the queue. throw new RuntimeException("armageddon"); // Dispatch thread should die, tearing down everything. } else if (req == failing) { phaser.arriveAndAwaitAdvance(); // Let test thread enqueue more ops before failing (and retrying) this. vessel.completeExceptionally(new IOException("failed")); + phaser.arriveAndAwaitAdvance(); // Ensure a retry is scheduled before test thread is allowed to continue. } else phaser.arriveAndAwaitAdvance(); // Don't complete from mock cluster, but require destruction to do this. }); @@ -228,7 +232,8 @@ class HttpRequestStrategyTest { CompletableFuture<HttpResponse> blocked = strategy.enqueue(id3, blocking); CompletableFuture<HttpResponse> delayed = strategy.enqueue(id4, request); phaser.arriveAndAwaitAdvance(); // inflight completes dispatch, but causes no response. - phaser.arriveAndAwaitAdvance(); // failed completes dispatch, and a retry is enqueued. + phaser.arriveAndAwaitAdvance(); // failed is allowed to dispatch ... + phaser.arriveAndAwaitAdvance(); // ... and a retry is enqueued. phaser.arriveAndAwaitAdvance(); // blocked starts dispatch, and hangs, blocking dispatch thread. // Current state: inflight is "inflight to cluster", serialised1/2 are waiting completion of it; diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java index a74f63f5cd2..f7c1b4d2b03 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; @@ -8,6 +8,7 @@ import org.bouncycastle.operator.ContentSigner; import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; import org.bouncycastle.util.io.pem.PemObject; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -52,7 +53,7 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_from_pem_files() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCaCertificates(certificateFile) .withCertificateAndKey(certificateFile, privateKeyFile) @@ -62,13 +63,13 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_when_no_builder_parameter_given() { - SSLContext sslContext = assertDoesNotThrow(() -> new SslContextBuilder().build()); + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder().build()); assertEquals("TLS", sslContext.getProtocol()); } @Test void successfully_constructs_sslcontext_with_only_certificate_file() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCertificateAndKey(certificateFile, privateKeyFile) .build()); @@ -77,7 +78,7 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_with_only_ca_certificate_file() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCaCertificates(certificateFile) .build()); diff --git a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java deleted file mode 100644 index 74baf9f1065..00000000000 --- a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; - -import ai.vespa.feed.client.Result.Type; - -/** - * Workaround for package-private {@link Result} constructor. - * - * @author bjorncs - */ -public class DryrunResult { - - private DryrunResult() {} - - public static Result create(Type type, DocumentId documentId, String resultMessage, String traceMessage) { - return new Result(type, documentId, resultMessage, traceMessage); - } -} diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java index 19a28bbacaf..01f314a7e36 100644 --- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java +++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.http.client.config; -import com.google.common.annotations.Beta; - import java.util.concurrent.TimeUnit; /** @@ -54,7 +52,6 @@ public final class FeedParams { * @param silentUpgrade true for reducing "false" 4xx/5xx. * @return this, for chaining */ - @Beta public Builder setSilentUpgrade(boolean silentUpgrade) { this.silentUpgrade = silentUpgrade; return this; @@ -184,7 +181,6 @@ public final class FeedParams { /** * Set what frequency to poll for async responses. Default is 10hz (every 0.1s), but 1000hz when using SyncFeedClient */ - @Beta public Builder setIdlePollFrequency(Double idlePollFrequency) { this.idlePollFrequency = idlePollFrequency; return this; diff --git a/vespa-osgi-testrunner/pom.xml b/vespa-osgi-testrunner/pom.xml index 6ec70b08d39..de1b5b4024b 100644 --- a/vespa-osgi-testrunner/pom.xml +++ b/vespa-osgi-testrunner/pom.xml @@ -25,7 +25,7 @@ <dependency> <groupId>org.junit.jupiter</groupId> <artifactId>junit-jupiter-engine</artifactId> - <version>5.7.0</version> + <version>5.8.1</version> <exclusions> <exclusion> <groupId>org.junit.jupiter</groupId> @@ -36,7 +36,7 @@ <dependency> <groupId>org.junit.platform</groupId> <artifactId>junit-platform-launcher</artifactId> - <version>1.6.2</version> + <version>1.8.1</version> <exclusions> <exclusion> <groupId>org.junit.jupiter</groupId> @@ -58,6 +58,12 @@ <scope>compile</scope> </dependency> <dependency> + <groupId>org.fusesource.jansi</groupId> + <artifactId>jansi</artifactId> + <version>1.18</version> + <scope>compile</scope> + </dependency> + <dependency> <groupId>com.yahoo.vespa</groupId> <artifactId>config-provisioning</artifactId> <version>${project.version}</version> diff --git a/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/HtmlLogger.java b/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/HtmlLogger.java new file mode 100644 index 00000000000..aa1900b8446 --- /dev/null +++ b/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/HtmlLogger.java @@ -0,0 +1,35 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.testrunner; + +import org.fusesource.jansi.HtmlAnsiOutputStream; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.logging.Level; +import java.util.logging.LogRecord; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Converts ANSI output to HTML-safe log records + * + * @author jonmv + */ +public class HtmlLogger { + + public static final Level HTML = new Level("html", 1) { }; + + private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + + public LogRecord toLog(String line) { + if (line.length() > 1 << 13) + line = line.substring(0, 1 << 13) + " ... (" + (line.length() - (1 << 13)) + " bytes truncated due to size)"; + + buffer.reset(); + try (PrintStream formatter = new PrintStream(new HtmlAnsiOutputStream(buffer))) { + formatter.print(line); + } + return new LogRecord(HTML, buffer.toString(UTF_8)); + } + +} diff --git a/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/VespaCliTestRunner.java b/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/VespaCliTestRunner.java index 1f9216192fc..f131292597c 100644 --- a/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/VespaCliTestRunner.java +++ b/vespa-osgi-testrunner/src/main/java/com/yahoo/vespa/testrunner/VespaCliTestRunner.java @@ -28,6 +28,7 @@ import static com.yahoo.vespa.testrunner.TestRunner.Status.ERROR; import static com.yahoo.vespa.testrunner.TestRunner.Status.FAILURE; import static com.yahoo.vespa.testrunner.TestRunner.Status.RUNNING; import static com.yahoo.vespa.testrunner.TestRunner.Status.SUCCESS; +import static com.yahoo.yolean.Exceptions.uncheck; import static java.nio.charset.StandardCharsets.UTF_8; /** @@ -39,15 +40,19 @@ public class VespaCliTestRunner implements TestRunner { private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; - private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); + private final Path testsPath; + private final AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); + + private Path vespaCliHome = null; @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { - this(config.artifactsPath()); + this(config.artifactsPath(), config.testsPath()); } - VespaCliTestRunner(Path artifactsPath) { + VespaCliTestRunner(Path artifactsPath, Path testsPath) { this.artifactsPath = artifactsPath; + this.testsPath = testsPath; } @Override @@ -70,7 +75,8 @@ public class VespaCliTestRunner implements TestRunner { @Override public boolean isSupported() { - return getChildDirectory(artifactsPath, "tests").isPresent(); + return Stream.of(Suite.SYSTEM_TEST, Suite.STAGING_SETUP_TEST, Suite.STAGING_TEST) + .anyMatch(suite -> getChildDirectory(testsPath, toSuiteDirectoryName(suite)).isPresent()); } void runTests(Suite suite, byte[] config) { @@ -78,13 +84,10 @@ public class VespaCliTestRunner implements TestRunner { try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); - BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); - in.lines().forEach(line -> { - if (line.length() > 1 << 13) - line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; - log(Level.INFO, line, null); - }); + HtmlLogger htmlLogger = new HtmlLogger(); + BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); + in.lines().forEach(line -> log(htmlLogger.toLog(line))); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { @@ -96,17 +99,27 @@ public class VespaCliTestRunner implements TestRunner { } } + private Path ensureHomeDirectoryForVespaCli() { + if (vespaCliHome == null) { + vespaCliHome = uncheck(() -> Files.createTempDirectory(VespaCliTestRunner.class.getSimpleName())); + vespaCliHome.toFile().deleteOnExit(); + } + return vespaCliHome; + } + ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { - Path suitePath = getChildDirectory(artifactsPath, "tests") - .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) + Path suitePath = getChildDirectory(testsPath, toSuiteDirectoryName(suite)) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), - "--endpoints", toEndpointsConfig(config), - "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), - "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); + "--zone", config.zone().value(), + "--target", "cloud"); builder.redirectErrorStream(true); + builder.environment().put("VESPA_CLI_HOME", ensureHomeDirectoryForVespaCli().toString()); + builder.environment().put("VESPA_CLI_ENDPOINTS", toEndpointsConfig(config)); + builder.environment().put("VESPA_CLI_DATA_PLANE_KEY_FILE", artifactsPath.resolve("key").toAbsolutePath().toString()); + builder.environment().put("VESPA_CLI_DATA_PLANE_CERT_FILE", artifactsPath.resolve("cert").toAbsolutePath().toString()); return builder; } @@ -115,6 +128,7 @@ public class VespaCliTestRunner implements TestRunner { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; + case PRODUCTION_TEST: return "production-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } @@ -122,6 +136,10 @@ public class VespaCliTestRunner implements TestRunner { private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); + log(record); + } + + private void log(LogRecord record) { logger.log(record); log.put(record.getSequenceNumber(), record); } diff --git a/vespa-osgi-testrunner/src/main/resources/configdefinitions/vespa-cli-test-runner.def b/vespa-osgi-testrunner/src/main/resources/configdefinitions/vespa-cli-test-runner.def index 7671096477e..b23d98e66ee 100644 --- a/vespa-osgi-testrunner/src/main/resources/configdefinitions/vespa-cli-test-runner.def +++ b/vespa-osgi-testrunner/src/main/resources/configdefinitions/vespa-cli-test-runner.def @@ -1,5 +1,11 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package=com.yahoo.vespa.testrunner +# Location of artifacts required for test runtime artifactsPath path + +# Location of HTTP tests +testsPath path + +# Whether credentials are from Athenz useAthenzCredentials bool default=false
\ No newline at end of file diff --git a/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/HtmlLoggerTest.java b/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/HtmlLoggerTest.java new file mode 100644 index 00000000000..cddb07dc4a6 --- /dev/null +++ b/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/HtmlLoggerTest.java @@ -0,0 +1,32 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.testrunner; + +import org.fusesource.jansi.Ansi; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.logging.LogRecord; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * @author jonmv + */ +class HtmlLoggerTest { + + @Test + void testConversionToHtml() { + String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); + List<String> messages = List.of(splitMessage.split("\n")); + LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); + assertEquals("html", html0.getLevel().getName()); + assertEquals("<span style=\"color: red;\"></body>Hello!</span>", + html0.getMessage()); + + LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); + assertEquals("html", html1.getLevel().getName()); + assertEquals("continued", + html1.getMessage()); + } + +} diff --git a/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/VespaCliTestRunnerTest.java b/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/VespaCliTestRunnerTest.java index be00f28d2c9..9ba8dfdc4dc 100644 --- a/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/VespaCliTestRunnerTest.java +++ b/vespa-osgi-testrunner/src/test/java/com/yahoo/vespa/testrunner/VespaCliTestRunnerTest.java @@ -44,23 +44,29 @@ class VespaCliTestRunnerTest { void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); - VespaCliTestRunner runner = new VespaCliTestRunner(temp); + Path tests = Files.createDirectory(temp.resolve("tests")); + Path artifacts = Files.createDirectory(temp.resolve("artifacts")); + VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); - Path tests = Files.createDirectory(temp.resolve("tests")); + Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, - () -> runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig)); - assertEquals("No tests found, for suite 'SYSTEM_TEST'", ise.getMessage()); + () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); + assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); - Path systemTests = Files.createDirectory(tests.resolve("system-test")); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", - "--endpoints", "{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https://dev.endpoint:443/\"}]}", - "--data-plane-public-cert", temp.resolve("cert").toAbsolutePath().toString(), - "--data-plane-private-key", temp.resolve("key").toAbsolutePath().toString()), + "--zone", "dev.aws-us-east-1c", + "--target", "cloud"), builder.command()); + assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https://dev.endpoint:443/\"}]}", + builder.environment().get("VESPA_CLI_ENDPOINTS")); + assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), + builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); + assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), + builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); } } diff --git a/vespa-testrunner-components/pom.xml b/vespa-testrunner-components/pom.xml index cb84f321f87..4b4b0cfa92c 100644 --- a/vespa-testrunner-components/pom.xml +++ b/vespa-testrunner-components/pom.xml @@ -42,12 +42,6 @@ </dependency> <dependency> - <groupId>org.fusesource.jansi</groupId> - <artifactId>jansi</artifactId> - <version>1.11</version> - </dependency> - - <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> diff --git a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java index ff66b31dfa8..e232e523cbf 100644 --- a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java +++ b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java @@ -37,7 +37,7 @@ public class PomXmlGenerator { " <version>1.0.0</version>\n" + "\n" + " <properties>\n" + - " <junit_version>5.7.0</junit_version>\n" + + " <junit_version>5.8.1</junit_version>\n" + " <surefire_version>2.22.0</surefire_version>\n" + "%PROPERTIES%" + " </properties>\n" + diff --git a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestRunner.java b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestRunner.java index 6f12535c317..69296c23fa0 100644 --- a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestRunner.java +++ b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestRunner.java @@ -3,16 +3,11 @@ package com.yahoo.vespa.hosted.testrunner; import com.google.inject.Inject; import com.yahoo.vespa.defaults.Defaults; -import org.fusesource.jansi.AnsiOutputStream; -import org.fusesource.jansi.HtmlAnsiOutputStream; +import com.yahoo.vespa.testrunner.HtmlLogger; -import java.io.BufferedOutputStream; import java.io.BufferedReader; -import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; -import java.io.PrintStream; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; @@ -30,7 +25,6 @@ import java.util.logging.Logger; import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.logging.Level.SEVERE; /** @@ -40,7 +34,6 @@ import static java.util.logging.Level.SEVERE; public class TestRunner implements com.yahoo.vespa.testrunner.TestRunner { private static final Logger logger = Logger.getLogger(TestRunner.class.getName()); - private static final Level HTML = new Level("html", 1) { }; private static final Path vespaHome = Paths.get(Defaults.getDefaults().vespaHome()); private static final String settingsXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<settings xmlns=\"http://maven.apache.org/SETTINGS/1.0.0\"\n" + @@ -57,7 +50,6 @@ public class TestRunner implements com.yahoo.vespa.testrunner.TestRunner { private final Path artifactsPath; private final Path testPath; - private final Path logFile; private final Path configFile; private final Path settingsFile; private final Function<TestProfile, ProcessBuilder> testBuilder; @@ -69,16 +61,14 @@ public class TestRunner implements com.yahoo.vespa.testrunner.TestRunner { public TestRunner(TestRunnerConfig config) { this(config.artifactsPath(), vespaHome.resolve("tmp/test"), - vespaHome.resolve("logs/vespa/maven.log"), vespaHome.resolve("tmp/config.json"), vespaHome.resolve("tmp/settings.xml"), profile -> mavenProcessFrom(profile, config)); } - TestRunner(Path artifactsPath, Path testPath, Path logFile, Path configFile, Path settingsFile, Function<TestProfile, ProcessBuilder> testBuilder) { + TestRunner(Path artifactsPath, Path testPath, Path configFile, Path settingsFile, Function<TestProfile, ProcessBuilder> testBuilder) { this.artifactsPath = artifactsPath; this.testPath = testPath; - this.logFile = logFile; this.configFile = configFile; this.settingsFile = settingsFile; this.testBuilder = testBuilder; @@ -154,29 +144,17 @@ public class TestRunner implements com.yahoo.vespa.testrunner.TestRunner { } boolean success; - // The AnsiOutputStream filters out ANSI characters, leaving the file contents pure. - try (PrintStream fileStream = new PrintStream(new AnsiOutputStream(new BufferedOutputStream(new FileOutputStream(logFile.toFile())))); - ByteArrayOutputStream logBuffer = new ByteArrayOutputStream(); - PrintStream logPlainFormatter = new PrintStream(new AnsiOutputStream(logBuffer)); - PrintStream logFormatter = new PrintStream(new HtmlAnsiOutputStream(logBuffer))){ + try { writeTestApplicationPom(testProfile); Files.write(configFile, testConfig); Files.write(settingsFile, settingsXml.getBytes()); Process mavenProcess = builder.start(); BufferedReader in = new BufferedReader(new InputStreamReader(mavenProcess.getInputStream())); + HtmlLogger htmlLogger = new HtmlLogger(); in.lines().forEach(line -> { - fileStream.println(line); - logFormatter.print(line); - String message = logBuffer.toString(UTF_8); - if (message.length() > 1 << 13) { - logBuffer.reset(); - logPlainFormatter.print(line); // Avoid HTML since we don't know what we'll strip here. - message = logBuffer.toString(UTF_8).substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; - } - LogRecord record = new LogRecord(HTML, message); - log.put(record.getSequenceNumber(), record); - logBuffer.reset(); + LogRecord html = htmlLogger.toLog(line); + log.put(html.getSequenceNumber(), html); }); success = mavenProcess.waitFor() == 0; } @@ -185,11 +163,6 @@ public class TestRunner implements com.yahoo.vespa.testrunner.TestRunner { record.setThrown(exception); logger.log(record); log.put(record.getSequenceNumber(), record); - try (PrintStream file = new PrintStream(new FileOutputStream(logFile.toFile(), true))) { - file.println(record.getMessage()); - exception.printStackTrace(file); - } - catch (IOException ignored) { } status = exception instanceof NoTestsException ? Status.FAILURE : Status.ERROR; return; } diff --git a/vespa-testrunner-components/src/test/java/com/yahoo/vespa/hosted/testrunner/TestRunnerTest.java b/vespa-testrunner-components/src/test/java/com/yahoo/vespa/hosted/testrunner/TestRunnerTest.java index b513dfba8b5..2b2c30790c9 100644 --- a/vespa-testrunner-components/src/test/java/com/yahoo/vespa/hosted/testrunner/TestRunnerTest.java +++ b/vespa-testrunner-components/src/test/java/com/yahoo/vespa/hosted/testrunner/TestRunnerTest.java @@ -30,7 +30,6 @@ public class TestRunnerTest { private Path artifactsPath; private Path testPath; - private Path logFile; private Path configFile; private Path settingsFile; @@ -40,31 +39,14 @@ public class TestRunnerTest { Files.createFile(artifactsPath.resolve("my-tests.jar")); Files.createFile(artifactsPath.resolve("my-fat-test.jar")); testPath = tmp.newFolder("testData").toPath(); - logFile = tmp.newFile("maven.log").toPath(); configFile = tmp.newFile("testConfig.json").toPath(); settingsFile = tmp.newFile("settings.xml").toPath(); } @Test - public void ansiCodesAreConvertedToHtml() throws InterruptedException { - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, - __ -> new ProcessBuilder("echo", Ansi.ansi().fg(Ansi.Color.RED).a("Hello!").reset().toString())); - runner.test(SYSTEM_TEST, new byte[0]); - while (runner.getStatus() == TestRunner.Status.RUNNING) { - Thread.sleep(10); - } - Iterator<LogRecord> log = runner.getLog(-1).iterator(); - log.next(); - LogRecord record = log.next(); - assertEquals("<span style=\"color: red;\">Hello!</span>", record.getMessage()); - assertEquals(0, runner.getLog(record.getSequenceNumber()).size()); - assertEquals(TestRunner.Status.SUCCESS, runner.getStatus()); - } - - @Test public void noTestJarIsAFailure() throws InterruptedException, IOException { Files.delete(artifactsPath.resolve("my-tests.jar")); - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, + TestRunner runner = new TestRunner(artifactsPath, testPath, configFile, settingsFile, __ -> new ProcessBuilder("This is a command that doesn't exist, for sure!")); runner.test(SYSTEM_TEST, new byte[0]); while (runner.getStatus() == TestRunner.Status.RUNNING) { @@ -80,7 +62,7 @@ public class TestRunnerTest { @Test public void errorLeadsToError() throws InterruptedException { - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, + TestRunner runner = new TestRunner(artifactsPath, testPath, configFile, settingsFile, __ -> new ProcessBuilder("false")); runner.test(SYSTEM_TEST, new byte[0]); while (runner.getStatus() == TestRunner.Status.RUNNING) { @@ -92,7 +74,7 @@ public class TestRunnerTest { @Test public void failureLeadsToFailure() throws InterruptedException { - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, + TestRunner runner = new TestRunner(artifactsPath, testPath, configFile, settingsFile, __ -> new ProcessBuilder("false")); runner.test(SYSTEM_TEST, new byte[0]); while (runner.getStatus() == TestRunner.Status.RUNNING) { @@ -104,7 +86,7 @@ public class TestRunnerTest { @Test public void filesAreGenerated() throws InterruptedException, IOException { - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, + TestRunner runner = new TestRunner(artifactsPath, testPath, configFile, settingsFile, __ -> new ProcessBuilder("echo", "Hello!")); runner.test(SYSTEM_TEST, "config".getBytes()); while (runner.getStatus() == TestRunner.Status.RUNNING) { @@ -113,12 +95,11 @@ public class TestRunnerTest { assertEquals("config", new String(Files.readAllBytes(configFile))); assertTrue(Files.exists(testPath.resolve("pom.xml"))); assertTrue(Files.exists(settingsFile)); - assertEquals("Hello!\n", new String(Files.readAllBytes(logFile))); } @Test public void runnerCanBeReused() throws InterruptedException, IOException { - TestRunner runner = new TestRunner(artifactsPath, testPath, logFile, configFile, settingsFile, + TestRunner runner = new TestRunner(artifactsPath, testPath, configFile, settingsFile, __ -> new ProcessBuilder("sleep", "0.1")); runner.test(SYSTEM_TEST, "config".getBytes()); assertEquals(TestRunner.Status.RUNNING, runner.getStatus()); diff --git a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests index 269bbd2a3c3..b07bc9a26cc 100644 --- a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests +++ b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests @@ -6,7 +6,7 @@ <version>1.0.0</version> <properties> - <junit_version>5.7.0</junit_version> + <junit_version>5.8.1</junit_version> <surefire_version>2.22.0</surefire_version> <my-comp.jar.path>components/my-comp.jar</my-comp.jar.path> <main.jar.path>main.jar</main.jar.path> diff --git a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java index 167e9b338a9..850513fb990 100644 --- a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java +++ b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java @@ -349,6 +349,9 @@ public class SimpleFeeder implements ReplyHandler { } return new JsonDestination(params.getDumpStream(), failure, numReplies); } + + + @SuppressWarnings("deprecation") SimpleFeeder(FeederParams params) { inputStreams = params.getInputStreams(); out = params.getStdOut(); diff --git a/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java b/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java index c2b3e9e4680..10184b35e4c 100644 --- a/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java +++ b/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java @@ -28,6 +28,7 @@ public class SimpleServer { private final MessageBus mbus; private final DestinationSession session; + @SuppressWarnings("deprecation") public SimpleServer(String configDir, MessageHandler msgHandler) throws IOException, ListenFailedException { slobrok = new Slobrok(); documentMgr = new DocumentTypeManager(); @@ -53,6 +54,7 @@ public class SimpleServer { writer.close(); } + @SuppressWarnings("deprecation") public final void close() { session.destroy(); mbus.destroy(); diff --git a/vespa_feed_perf/src/test/resources/documentmanager.cfg b/vespa_feed_perf/src/test/resources/documentmanager.cfg index ebb6e767bef..e224aeea1aa 100644 --- a/vespa_feed_perf/src/test/resources/documentmanager.cfg +++ b/vespa_feed_perf/src/test/resources/documentmanager.cfg @@ -34,7 +34,7 @@ datatype[1].structtype[0].field[0].name "my_str" datatype[1].structtype[0].field[0].id[0] datatype[1].structtype[0].inherits[0] datatype[1].weightedsettype[0] -datatype[2].id -1668955062 +datatype[2].id 485659380 datatype[2].annotationreftype[0] datatype[2].arraytype[0] datatype[2].documenttype[1] diff --git a/vespabase/src/common-env.sh b/vespabase/src/common-env.sh index cd8c7c604ad..f75ffdbd13f 100755 --- a/vespabase/src/common-env.sh +++ b/vespabase/src/common-env.sh @@ -295,6 +295,10 @@ log_warning_message () { get_numa_ctl_cmd () { if ! type numactl &> /dev/null; then + if test "$(uname -s)" = Darwin + then + return 0 + fi echo "FATAL: Could not find required program numactl." exit 1 fi diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/FeedReplyReader.java b/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/FeedReplyReader.java index 11fd97f2a1d..50f79c0a828 100644 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/FeedReplyReader.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/FeedReplyReader.java @@ -1,21 +1,21 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.http.server; -import java.util.Set; -import java.util.logging.Logger; - import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol; import com.yahoo.documentapi.metrics.DocumentApiMetrics; import com.yahoo.documentapi.metrics.DocumentOperationStatus; import com.yahoo.documentapi.metrics.DocumentOperationType; import com.yahoo.jdisc.Metric; -import java.util.logging.Level; import com.yahoo.messagebus.Reply; import com.yahoo.messagebus.ReplyHandler; import com.yahoo.messagebus.Trace; import com.yahoo.vespa.http.client.core.ErrorCode; import com.yahoo.vespa.http.client.core.OperationStatus; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + /** * Catch message bus replies and make the available to a given session. * @@ -26,10 +26,12 @@ public class FeedReplyReader implements ReplyHandler { private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName()); private final Metric metric; private final DocumentApiMetrics metricsHelper; + private final Metric.Context testAndSetMetricCtx; public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) { this.metric = metric; this.metricsHelper = metricsHelper; + this.testAndSetMetricCtx = metric.createContext(Map.of("operationType", "testAndSet")); } @Override @@ -42,20 +44,26 @@ public class FeedReplyReader implements ReplyHandler { final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d; metric.set(MetricNames.LATENCY, latencyInSeconds, null); - if (reply.hasErrors()) { - Set<Integer> errorCodes = reply.getErrorCodes(); - metricsHelper.reportFailure(DocumentOperationType.fromMessage(reply.getMessage()), - DocumentOperationStatus.fromMessageBusErrorCodes(errorCodes)); + DocumentOperationType type = DocumentOperationType.fromMessage(reply.getMessage()); + boolean conditionMet = conditionMet(reply); + if (reply.hasErrors() && conditionMet) { + DocumentOperationStatus status = DocumentOperationStatus.fromMessageBusErrorCodes(reply.getErrorCodes()); + metricsHelper.reportFailure(type, status); metric.add(MetricNames.FAILED, 1, null); - enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR, - reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED, reply.getTrace()); + enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR, false, reply.getTrace()); } else { - metricsHelper.reportSuccessful(DocumentOperationType.fromMessage(reply.getMessage()), latencyInSeconds); + metricsHelper.reportSuccessful(type, latencyInSeconds); metric.add(MetricNames.SUCCEEDED, 1, null); - enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace()); + if (!conditionMet) + metric.add(MetricNames.CONDITION_NOT_MET, 1, testAndSetMetricCtx); + enqueue(context, "Document processed.", ErrorCode.OK, !conditionMet, reply.getTrace()); } } + private static boolean conditionMet(Reply reply) { + return !reply.hasErrors() || reply.getError(0).getCode() != DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED; + } + private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) { try { String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : ""; diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/MetricNames.java b/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/MetricNames.java index 8cd628a83d9..6ded410ff68 100644 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/MetricNames.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/vespa/http/server/MetricNames.java @@ -18,6 +18,7 @@ public final class MetricNames { public static final String OPERATIONS_PER_SEC = PREFIX + "ops_per_sec"; public static final String LATENCY = PREFIX + "latency"; public static final String FAILED = PREFIX + "failed"; + public static final String CONDITION_NOT_MET = PREFIX + "condition_not_met"; public static final String PARSE_ERROR = PREFIX + "parse_error"; public static final String SUCCEEDED = PREFIX + "succeeded"; public static final String PENDING = PREFIX + "pending"; diff --git a/vespaclient/src/vespa/vespaclient/vesparoute/application.cpp b/vespaclient/src/vespa/vespaclient/vesparoute/application.cpp index aaa588c0353..7a697941d88 100644 --- a/vespaclient/src/vespa/vespaclient/vesparoute/application.cpp +++ b/vespaclient/src/vespa/vespaclient/vesparoute/application.cpp @@ -2,6 +2,7 @@ #include "application.h" +#include <vespa/document/config/documenttypes_config_fwd.h> #include <vespa/document/config/config-documenttypes.h> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/documentapi/messagebus/documentprotocol.h> @@ -18,7 +19,6 @@ #include <vespa/fnet/frt/supervisor.h> using config::ConfigGetter; -using document::DocumenttypesConfig; using messagebus::MessagebusConfig; using document::DocumentTypeRepo; diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/CachedThreadPoolWithFallback.java b/vespajlib/src/main/java/com/yahoo/concurrent/CachedThreadPoolWithFallback.java index ddd7882aee6..13d2463cf2c 100644 --- a/vespajlib/src/main/java/com/yahoo/concurrent/CachedThreadPoolWithFallback.java +++ b/vespajlib/src/main/java/com/yahoo/concurrent/CachedThreadPoolWithFallback.java @@ -11,18 +11,20 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; /** - * An executor that will first try a bounded cached threadpool before falling back to a unbounded - * single threaded threadpool that will take over dispatching to the primary pool. + * An executor that will first try a bounded cached thread pool before falling back to an unbounded + * single threaded thread pool that will take over dispatching to the primary pool. * */ public class CachedThreadPoolWithFallback implements AutoCloseable, Executor { private final ExecutorService primary; private final ExecutorService secondary; - public CachedThreadPoolWithFallback(String baseName, int corePoolSize, int maximumPoolSize, long keepAlimeTime, TimeUnit timeUnit) { - primary = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAlimeTime, timeUnit, + + public CachedThreadPoolWithFallback(String baseName, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit timeUnit) { + primary = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAliveTime, timeUnit, new SynchronousQueue<>(), ThreadFactoryFactory.getDaemonThreadFactory(baseName + ".primary")); secondary = Executors.newSingleThreadExecutor(ThreadFactoryFactory.getDaemonThreadFactory(baseName + ".secondary")); } + @Override public void execute(Runnable command) { try { @@ -31,6 +33,7 @@ public class CachedThreadPoolWithFallback implements AutoCloseable, Executor { secondary.execute(() -> retryForever(command)); } } + private void retryForever(Runnable command) { while (true) { try { @@ -51,6 +54,7 @@ public class CachedThreadPoolWithFallback implements AutoCloseable, Executor { primary.shutdown(); join(primary); } + private static void join(ExecutorService executor) { while (true) { try { @@ -60,4 +64,5 @@ public class CachedThreadPoolWithFallback implements AutoCloseable, Executor { } catch (InterruptedException e) {} } } + } diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java index 2dab634d8be..a1235c3821d 100644 --- a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java +++ b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java @@ -1,8 +1,15 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.concurrent; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import com.yahoo.yolean.UncheckedInterruptedException; + +import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; /** * Helper for {@link java.util.concurrent.CompletableFuture} / {@link java.util.concurrent.CompletionStage}. @@ -64,4 +71,64 @@ public class CompletableFutures { return combiner.combined; } + /** Similar to {@link CompletableFuture#allOf(CompletableFuture[])} but returns a list of the results */ + public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) { + return CompletableFuture.allOf(futures.toArray(CompletableFuture[]::new)) + .thenApply(__ -> { + List<T> results = new ArrayList<>(); + for (CompletableFuture<T> f : futures) { + try { + results.add(f.get()); + } catch (InterruptedException | ExecutionException e) { + // Should not happen since all futures are completed without exception + throw new IllegalStateException(e); + } + } + return results; + }); + } + + /** + * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis + * @deprecated to be removed in Vespa 8 + */ + @SuppressWarnings("unchecked") + @Deprecated(forRemoval = true, since = "7") + public static <V> ListenableFuture<V> toGuavaListenableFuture(CompletableFuture<V> future) { + if (future instanceof ListenableFuture) { + return ((ListenableFuture<V>) future); + } + SettableFuture<V> guavaFuture = SettableFuture.create(); + future.whenComplete((result, error) -> { + if (result != null) guavaFuture.set(result); + else if (error instanceof CancellationException) guavaFuture.setException(error); + else guavaFuture.cancel(true); + }); + return guavaFuture; + } + + /** + * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis + * @deprecated to be removed in Vespa 8 + */ + @Deprecated(forRemoval = true, since = "7") + public static <V> CompletableFuture<V> toCompletableFuture(ListenableFuture<V> guavaFuture) { + CompletableFuture<V> future = new CompletableFuture<>(); + guavaFuture.addListener( + () -> { + if (guavaFuture.isCancelled()) future.cancel(true); + try { + V value = guavaFuture.get(); + future.complete(value); + } catch (InterruptedException e) { + // Should not happens since listener is invoked after future is complete + throw new UncheckedInterruptedException(e); + } catch (ExecutionException e) { + future.completeExceptionally(e.getCause()); + } + }, + Runnable::run); + return future; + } + } diff --git a/vespajlib/src/main/java/com/yahoo/geo/DistanceParser.java b/vespajlib/src/main/java/com/yahoo/geo/DistanceParser.java index 13bff614d83..acb7ed95597 100644 --- a/vespajlib/src/main/java/com/yahoo/geo/DistanceParser.java +++ b/vespajlib/src/main/java/com/yahoo/geo/DistanceParser.java @@ -2,7 +2,7 @@ package com.yahoo.geo; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; /** * Utility for parsing a geographical distance with unit. diff --git a/vespajlib/src/main/java/com/yahoo/io/reader/NamedReader.java b/vespajlib/src/main/java/com/yahoo/io/reader/NamedReader.java index 3129ddba638..f54caa225b0 100644 --- a/vespajlib/src/main/java/com/yahoo/io/reader/NamedReader.java +++ b/vespajlib/src/main/java/com/yahoo/io/reader/NamedReader.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.io.reader; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import java.io.IOException; import java.io.Reader; diff --git a/vespajlib/src/main/java/com/yahoo/path/Path.java b/vespajlib/src/main/java/com/yahoo/path/Path.java index 3254c081f31..12f93d15737 100644 --- a/vespajlib/src/main/java/com/yahoo/path/Path.java +++ b/vespajlib/src/main/java/com/yahoo/path/Path.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.path; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.google.common.collect.ImmutableList; import java.io.File; diff --git a/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java b/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java index 6450982540f..dbd9771afe9 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class ArrayValue extends Value { private int capacity = 16; diff --git a/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java b/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java index 00f3adf82a1..5f40050a7df 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java @@ -1,13 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class BoolValue extends Value { + private static final BoolValue trueValue = new BoolValue(true); private static final BoolValue falseValue = new BoolValue(false); private final boolean value; private BoolValue(boolean value) { this.value = value; } - final public Type type() { return Type.BOOL; } - final public boolean asBool() { return this.value; } - public final void accept(Visitor v) { v.visitBool(value); } + public Type type() { return Type.BOOL; } + public boolean asBool() { return this.value; } + public void accept(Visitor v) { v.visitBool(value); } public static BoolValue instance(boolean bit) { return (bit ? trueValue : falseValue); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Cursor.java b/vespajlib/src/main/java/com/yahoo/slime/Cursor.java index 2696e923bd5..e6493a2ba4c 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Cursor.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Cursor.java @@ -16,270 +16,298 @@ package com.yahoo.slime; * not connected to an array value (for add methods), or it's not * connected to an object (for set methods). Also note that you can * only set() a field once; you cannot overwrite the field in any way. - **/ + * + * @author havardpe + */ public interface Cursor extends Inspector { /** - * Access an array entry. + * Accesses an array entry. * * If the current Cursor doesn't connect to an array value, * or the given array index is out of bounds, the returned * Cursor will be invalid. - * @param idx array index. - * @return a new Cursor for the entry value. - **/ + * + * @param idx array index + * @return a new Cursor for the entry value + */ @Override - public Cursor entry(int idx); + Cursor entry(int idx); /** - * Access an field in an object by symbol id. + * Accesses a field in an object by symbol id. * * If the current Cursor doesn't connect to an object value, or * the object value does not contain a field with the given symbol * id, the returned Cursor will be invalid. - * @param sym symbol id. - * @return a new Cursor for the field value. - **/ + * + * @param sym symbol id + * @return a new Cursor for the field value + */ @Override - public Cursor field(int sym); + Cursor field(int sym); /** - * Access an field in an object by symbol name. + * Accesses a field in an object by symbol name. * * If the current Cursor doesn't connect to an object value, or * the object value does not contain a field with the given symbol * name, the returned Cursor will be invalid. - * @param name symbol name. - * @return a new Cursor for the field value. - **/ + * + * @param name symbol name + * @return a new Cursor for the field value + */ @Override - public Cursor field(String name); + Cursor field(String name); /** - * Append an array entry containing a new value of NIX type. + * Appends an array entry containing a new value of NIX type. * Returns an invalid Cursor if unsuccessful. - * @return a valid Cursor referencing the new entry value if successful. - **/ - public Cursor addNix(); + * + * @return a valid Cursor referencing the new entry value if successful + */ + Cursor addNix(); /** - * Append an array entry containing a new value of BOOL type. + * Appends an array entry containing a new value of BOOL type. * Returns an invalid Cursor if unsuccessful. - * @param bit the actual boolean value for initializing a new BoolValue. - * @return a valid Cursor referencing the new entry value if successful. - **/ - public Cursor addBool(boolean bit); + * + * @param bit the actual boolean value for initializing a new BoolValue + * @return a valid Cursor referencing the new entry value if successful + */ + Cursor addBool(boolean bit); - /** add a new entry of LONG type to an array */ - public Cursor addLong(long l); + /** Adds a new entry of LONG type to an array. */ + Cursor addLong(long l); - /** add a new entry of DOUBLE type to an array */ - public Cursor addDouble(double d); + /** Adds a new entry of DOUBLE type to an array. */ + Cursor addDouble(double d); - /** add a new entry of STRING type to an array */ - public Cursor addString(String str); + /** Add a new entry of STRING type to an array. */ + Cursor addString(String str); - /** add a new entry of STRING type to an array */ - public Cursor addString(byte[] utf8); + /** Add a new entry of STRING type to an array. */ + Cursor addString(byte[] utf8); - /** add a new entry of DATA type to an array */ - public Cursor addData(byte[] data); + /** Add a new entry of DATA type to an array. */ + Cursor addData(byte[] data); /** - * Append an array entry containing a new value of ARRAY type. + * Appends an array entry containing a new value of ARRAY type. * Returns a valid Cursor (thay may again be used for adding new * sub-array entries) referencing the new entry value if * successful; otherwise returns an invalid Cursor. - * @return new Cursor for the new entry value - **/ - public Cursor addArray(); + * + * @return a new Cursor for the new entry value + */ + Cursor addArray(); /** - * Append an array entry containing a new value of OBJECT type. + * Appends an array entry containing a new value of OBJECT type. * Returns a valid Cursor (thay may again be used for setting * sub-fields inside the new object) referencing the new entry * value if successful; otherwise returns an invalid Cursor. - * @return new Cursor for the new entry value - **/ - public Cursor addObject(); + * + * @return a new Cursor for the new entry value + */ + Cursor addObject(); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of NIX type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @return new Cursor for the new field value - **/ - public Cursor setNix(int sym); + */ + Cursor setNix(int sym); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param bit the actual boolean value for the new field * @return new Cursor for the new field value - **/ - public Cursor setBool(int sym, boolean bit); + */ + Cursor setBool(int sym, boolean bit); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param l the actual long value for the new field * @return new Cursor for the new field value - **/ - public Cursor setLong(int sym, long l); + */ + Cursor setLong(int sym, long l); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param d the actual double value for the new field * @return new Cursor for the new field value - **/ - public Cursor setDouble(int sym, double d); + */ + Cursor setDouble(int sym, double d); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param str the actual string for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(int sym, String str); + * @return a new Cursor for the new field value + */ + Cursor setString(int sym, String str); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param utf8 the actual string (encoded as UTF-8 data) for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(int sym, byte[] utf8); + * @return a new Cursor for the new field value + */ + Cursor setString(int sym, byte[] utf8); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param data the actual data to be put into the new field - * @return new Cursor for the new field value - **/ - public Cursor setData(int sym, byte[] data); + * @return a new Cursor for the new field value + */ + Cursor setData(int sym, byte[] data); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of ARRAY type. Returns a valid Cursor (thay may again be * used for adding new array entries) referencing the new field * value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setArray(int sym); + * @return a new Cursor for the new field value + */ + Cursor setArray(int sym); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of OBJECT type. Returns a valid Cursor (thay may again * be used for setting sub-fields inside the new object) * referencing the new field value if successful; otherwise * returns an invalid Cursor. + * * @param sym symbol id for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setObject(int sym); + * @return a new Cursor for the new field value + */ + Cursor setObject(int sym); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of NIX type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setNix(String name); + * @return a new Cursor for the new field value + */ + Cursor setNix(String name); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param bit the actual boolean value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setBool(String name, boolean bit); + * @return a new Cursor for the new field value + */ + Cursor setBool(String name, boolean bit); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of LONG type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param l the actual long value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setLong(String name, long l); + * @return a new Cursor for the new field value + */ + Cursor setLong(String name, long l); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of DOUBLE type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param d the actual double value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setDouble(String name, double d); + * @return a new Cursor for the new field value + */ + Cursor setDouble(String name, double d); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of STRING type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param str the actual string for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(String name, String str); + * @return a new Cursor for the new field value + */ + Cursor setString(String name, String str); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of STRING type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param utf8 the actual string (encoded as UTF-8 data) for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(String name, byte[] utf8); + * @return a new Cursor for the new field value + */ + Cursor setString(String name, byte[] utf8); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of DATA type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param data the actual data to be put into the new field - * @return new Cursor for the new field value - **/ - public Cursor setData(String name, byte[] data); + * @return a new Cursor for the new field value + */ + Cursor setData(String name, byte[] data); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of ARRAY type. Returns a valid Cursor (thay may again be * used for adding new array entries) referencing the new field * value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setArray(String name); + * @return a new Cursor for the new field value + */ + Cursor setArray(String name); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of OBJECT type. Returns a valid Cursor (thay may again * be used for setting sub-fields inside the new object) * referencing the new field value if successful; otherwise * returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setObject(String name); + * @return a new Cursor for the new field value + */ + Cursor setObject(String name); + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/DataValue.java b/vespajlib/src/main/java/com/yahoo/slime/DataValue.java index 5081b3fdbc7..91f20335eb1 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/DataValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/DataValue.java @@ -1,7 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class DataValue extends Value { + private final byte[] value; private DataValue(byte[] value) { this.value = value; } public static Value create(byte[] value) { @@ -11,7 +15,8 @@ final class DataValue extends Value { return new DataValue(value); } } - public final Type type() { return Type.DATA; } - public final byte[] asData() { return this.value; } - public final void accept(Visitor v) { v.visitData(value); } + public Type type() { return Type.DATA; } + public byte[] asData() { return this.value; } + public void accept(Visitor v) { v.visitData(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java b/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java index 22b685d5419..23f636f126d 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java @@ -1,11 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class DoubleValue extends Value { + private final double value; public DoubleValue(double value) { this.value = value; } - public final Type type() { return Type.DOUBLE; } - public final long asLong() { return (long)this.value; } - public final double asDouble() { return this.value; } - public final void accept(Visitor v) { v.visitDouble(value); } + public Type type() { return Type.DOUBLE; } + public long asLong() { return (long)this.value; } + public double asDouble() { return this.value; } + public void accept(Visitor v) { v.visitDouble(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java index d6818907bf3..788e872f5ce 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java +++ b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java @@ -77,7 +77,7 @@ public class JsonDecoder { @SuppressWarnings("fallthrough") private void decodeNumber(Inserter inserter) { buf.reset(); - boolean likelyFloatingPoint=false; + boolean likelyFloatingPoint = false; for (;;) { switch (c) { case '.': case 'e': case 'E': diff --git a/vespajlib/src/main/java/com/yahoo/slime/LongValue.java b/vespajlib/src/main/java/com/yahoo/slime/LongValue.java index 62752f2b27c..e728e890274 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/LongValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/LongValue.java @@ -1,11 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class LongValue extends Value { + private final long value; public LongValue(long value) { this.value = value; } - public final Type type() { return Type.LONG; } - public final long asLong() { return this.value; } - public final double asDouble() { return (double)this.value; } - public final void accept(Visitor v) { v.visitLong(value); } + public Type type() { return Type.LONG; } + public long asLong() { return this.value; } + public double asDouble() { return this.value; } + public void accept(Visitor v) { v.visitLong(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/NixValue.java b/vespajlib/src/main/java/com/yahoo/slime/NixValue.java index b65cd1dabbf..4ae60f26f07 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/NixValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/NixValue.java @@ -1,12 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class NixValue extends Value { + private static final NixValue invalidNix = new NixValue(); private static final NixValue validNix = new NixValue(); private NixValue() {} - public final Type type() { return Type.NIX; } - public final void accept(Visitor v) { + public Type type() { return Type.NIX; } + public void accept(Visitor v) { if (valid()) { v.visitNix(); } else { @@ -15,4 +19,5 @@ final class NixValue extends Value { } public static NixValue invalid() { return invalidNix; } public static NixValue instance() { return validNix; } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java b/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java index 33d2e5be4ed..6ba16f8dd6c 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java @@ -6,7 +6,9 @@ package com.yahoo.slime; * value fields. Fields can be inspected or traversed using the * {@link Inspector} interface, and you can add new fields by using the * various "set" methods in the @ref Cursor interface. - **/ + * + * @author havardpe + */ final class ObjectValue extends Value { private int capacity = 16; @@ -16,7 +18,7 @@ final class ObjectValue extends Value { private int[] hash = new int[capacity + hashSize() + (capacity << 1)]; private final SymbolTable names; - private final void rehash() { + private void rehash() { capacity = (capacity << 1); Value[] v = values; values = new Value[capacity]; @@ -37,7 +39,7 @@ final class ObjectValue extends Value { } } - private final Value put(int sym, Value value) { + private Value put(int sym, Value value) { if (used == capacity) { rehash(); } @@ -59,7 +61,7 @@ final class ObjectValue extends Value { return value; } - private final Value get(int sym) { + private Value get(int sym) { int entry = hash[capacity + (sym % hashSize())]; while (entry != 0) { final int idx = hash[entry]; @@ -77,32 +79,33 @@ final class ObjectValue extends Value { put(sym, value); } - public final Type type() { return Type.OBJECT; } - public final int children() { return used; } - public final int fields() { return used; } + public Type type() { return Type.OBJECT; } + public int children() { return used; } + public int fields() { return used; } - public final Value field(int sym) { return get(sym); } - public final Value field(String name) { return get(names.lookup(name)); } + public Value field(int sym) { return get(sym); } + public Value field(String name) { return get(names.lookup(name)); } - public final void accept(Visitor v) { v.visitObject(this); } + public void accept(Visitor v) { v.visitObject(this); } - public final void traverse(ObjectSymbolTraverser ot) { + public void traverse(ObjectSymbolTraverser ot) { for (int i = 0; i < used; ++i) { ot.field(hash[i], values[i]); } } - public final void traverse(ObjectTraverser ot) { + public void traverse(ObjectTraverser ot) { for (int i = 0; i < used; ++i) { ot.field(names.inspect(hash[i]), values[i]); } } - protected final Cursor setLeaf(int sym, Value value) { return put(sym, value); } - public final Cursor setArray(int sym) { return put(sym, new ArrayValue(names)); } - public final Cursor setObject(int sym) { return put(sym, new ObjectValue(names)); } + protected Cursor setLeaf(int sym, Value value) { return put(sym, value); } + public Cursor setArray(int sym) { return put(sym, new ArrayValue(names)); } + public Cursor setObject(int sym) { return put(sym, new ObjectValue(names)); } + + protected Cursor setLeaf(String name, Value value) { return put(names.insert(name), value); } + public Cursor setArray(String name) { return put(names.insert(name), new ArrayValue(names)); } + public Cursor setObject(String name) { return put(names.insert(name), new ObjectValue(names)); } - protected final Cursor setLeaf(String name, Value value) { return put(names.insert(name), value); } - public final Cursor setArray(String name) { return put(names.insert(name), new ArrayValue(names)); } - public final Cursor setObject(String name) { return put(names.insert(name), new ObjectValue(names)); } } diff --git a/vespajlib/src/main/java/com/yahoo/slime/StringValue.java b/vespajlib/src/main/java/com/yahoo/slime/StringValue.java index fbd4e150f7e..d7a7281ca1d 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/StringValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/StringValue.java @@ -4,8 +4,11 @@ package com.yahoo.slime; /** * A value holding a String in Java native format. * See also @ref Utf8Value (for lazy decoding). - **/ + * + * @author havardpe + */ final class StringValue extends Value { + private final String value; private byte[] utf8; private StringValue(String value) { this.value = value; } @@ -16,13 +19,14 @@ final class StringValue extends Value { return new StringValue(value); } } - public final Type type() { return Type.STRING; } - public final String asString() { return this.value; } - public final byte[] asUtf8() { + public Type type() { return Type.STRING; } + public String asString() { return this.value; } + public byte[] asUtf8() { if (utf8 == null) { utf8 = Utf8Codec.encode(value); } return utf8; } - public final void accept(Visitor v) { v.visitString(value); } + public void accept(Visitor v) { v.visitString(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java b/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java index 3ddcf4e4e24..4ea0dcc6a6e 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java @@ -5,8 +5,11 @@ package com.yahoo.slime; * A value type encapsulating a String in its UTF-8 representation. * Useful for lazy decoding; if the data is just passed through in * UTF-8 it will never be converted at all. - **/ + * + * @author havardpe + */ final class Utf8Value extends Value { + private final byte[] value; private String string; private Utf8Value(byte[] value) { this.value = value; } @@ -17,13 +20,14 @@ final class Utf8Value extends Value { return new Utf8Value(value); } } - public final Type type() { return Type.STRING; } - public final String asString() { + public Type type() { return Type.STRING; } + public String asString() { if (string == null) { string = Utf8Codec.decode(value, 0, value.length); } return string; } - public final byte[] asUtf8() { return value; } - public final void accept(Visitor v) { v.visitString(value); } + public byte[] asUtf8() { return value; } + public void accept(Visitor v) { v.visitString(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Value.java b/vespajlib/src/main/java/com/yahoo/slime/Value.java index 181dc033f3f..6fb267ab9bb 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Value.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Value.java @@ -1,16 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; - import java.io.ByteArrayOutputStream; import java.util.Arrays; /** * Common implementation for all value types. * All default behavior is here, so specific types only - * need override their actually useful parts. - **/ - + * need override their actually useful parts + * + * @author havardpe + */ abstract class Value implements Cursor { private static final String emptyString = ""; diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java index 714e8deb0bb..a6f71dacf30 100644 --- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java +++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.tensor.functions; -import com.google.common.annotations.Beta; +import com.yahoo.api.annotations.Beta; import com.yahoo.tensor.PartialAddress; import com.yahoo.tensor.Tensor; import com.yahoo.tensor.TensorAddress; diff --git a/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java b/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java index 67311d75029..28930b67264 100644 --- a/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java +++ b/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java @@ -8,9 +8,7 @@ import java.io.IOException; import java.util.List; import java.util.stream.Collectors; -import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -37,7 +35,8 @@ public class SlimeUtilsTest { SlimeUtils.copyObject(slime2.get(), subobj); - assertThat(root.toString(), is("{\"foo\":\"foobie\",\"bar\":{\"a\":\"a\",\"b\":2,\"c\":true,\"d\":3.14,\"e\":\"0x64\",\"f\":null}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{\"a\":\"a\",\"b\":2,\"c\":true,\"d\":3.14,\"e\":\"0x64\",\"f\":null}}", + root.toString()); } @Test @@ -61,7 +60,8 @@ public class SlimeUtilsTest { SlimeUtils.copyObject(slime2.get(), subobj); - assertThat(root.toString(), is("{\"foo\":\"foobie\",\"bar\":{\"a\":[\"foo\",4,true,3.14,null,\"0x64\",{\"inner\":\"binner\"}]}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{\"a\":[\"foo\",4,true,3.14,null,\"0x64\",{\"inner\":\"binner\"}]}}", + root.toString()); } @Test @@ -71,21 +71,21 @@ public class SlimeUtilsTest { root.setString("foo", "foobie"); root.setObject("bar"); String json = Utf8.toString(SlimeUtils.toJsonBytes(slime)); - assertThat(json, is("{\"foo\":\"foobie\",\"bar\":{}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{}}", json); } @Test public void test_json_to_slime() { byte[] json = Utf8.toBytes("{\"foo\":\"foobie\",\"bar\":{}}"); Slime slime = SlimeUtils.jsonToSlime(json); - assertThat(slime.get().field("foo").asString(), is("foobie")); + assertEquals("foobie", slime.get().field("foo").asString()); assertTrue(slime.get().field("bar").valid()); } @Test public void test_json_to_slime_or_throw() { Slime slime = SlimeUtils.jsonToSlimeOrThrow("{\"foo\":\"foobie\",\"bar\":{}}"); - assertThat(slime.get().field("foo").asString(), is("foobie")); + assertEquals("foobie", slime.get().field("foo").asString()); assertTrue(slime.get().field("bar").valid()); } @@ -107,7 +107,7 @@ public class SlimeUtilsTest { assertEquals(0, SlimeUtils.entriesStream(inspector.field("object")).count()); assertEquals(List.of(1L, 2L, 4L, 3L, 0L), - SlimeUtils.entriesStream(inspector.field("list")).map(Inspector::asLong).collect(Collectors.toList())); + SlimeUtils.entriesStream(inspector.field("list")).map(Inspector::asLong).collect(Collectors.toList())); } } diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt index 6acac5d3bf5..400c1ec5d1a 100644 --- a/vespalib/CMakeLists.txt +++ b/vespalib/CMakeLists.txt @@ -42,6 +42,7 @@ vespa_define_module( src/tests/datastore/array_store src/tests/datastore/array_store_config src/tests/datastore/buffer_type + src/tests/datastore/compact_buffer_candidates src/tests/datastore/datastore src/tests/datastore/fixed_size_hash_map src/tests/datastore/sharded_hash_map @@ -150,6 +151,8 @@ vespa_define_module( src/tests/util/size_literals src/tests/valgrind src/tests/visit_ranges + src/tests/invokeservice + src/tests/wakeup src/tests/websocket src/tests/zcurve diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp index e7d923d0e87..974aafb392a 100644 --- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp +++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp @@ -5,9 +5,12 @@ #include <vespa/vespalib/btree/btreeroot.hpp> #include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/vespalib/datastore/buffer_type.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/gtest/gtest.h> using vespalib::GenerationHandler; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace vespalib::btree { @@ -73,61 +76,115 @@ BTreeStoreTest::~BTreeStoreTest() inc_generation(); } +namespace { + +class ChangeWriter { + std::vector<EntryRef*> _old_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + void write(const std::vector<EntryRef>& refs); + void emplace_back(EntryRef& ref) { _old_refs.emplace_back(&ref); } +}; + +ChangeWriter::ChangeWriter(uint32_t capacity) + : _old_refs() +{ + _old_refs.reserve(capacity); +} + +ChangeWriter::~ChangeWriter() = default; + +void +ChangeWriter::write(const std::vector<EntryRef> &refs) +{ + assert(refs.size() == _old_refs.size()); + auto old_ref_itr = _old_refs.begin(); + for (auto ref : refs) { + **old_ref_itr = ref; + ++old_ref_itr; + } + assert(old_ref_itr == _old_refs.end()); + _old_refs.clear(); +} + +} + void BTreeStoreTest::test_compact_sequence(uint32_t sequence_length) { auto &store = _store; + uint32_t entry_ref_offset_bits = TreeStore::RefType::offset_bits; EntryRef ref1 = add_sequence(4, 4 + sequence_length); EntryRef ref2 = add_sequence(5, 5 + sequence_length); - EntryRef old_ref1 = ref1; - EntryRef old_ref2 = ref2; std::vector<EntryRef> refs; + refs.reserve(2); + refs.emplace_back(ref1); + refs.emplace_back(ref2); + std::vector<EntryRef> temp_refs; for (int i = 0; i < 1000; ++i) { - refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length)); + temp_refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length)); } - for (auto& ref : refs) { + for (auto& ref : temp_refs) { store.clear(ref); } inc_generation(); + ChangeWriter change_writer(refs.size()); + std::vector<EntryRef> move_refs; + move_refs.reserve(refs.size()); auto usage_before = store.getMemoryUsage(); for (uint32_t pass = 0; pass < 15; ++pass) { - auto to_hold = store.start_compact_worst_buffers(); - ref1 = store.move(ref1); - ref2 = store.move(ref2); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + auto to_hold = store.start_compact_worst_buffers(compaction_spec, compaction_strategy); + std::vector<bool> filter(TreeStore::RefType::numBuffers()); + for (auto buffer_id : to_hold) { + filter[buffer_id] = true; + } + for (auto& ref : refs) { + if (ref.valid() && filter[ref.buffer_id(entry_ref_offset_bits)]) { + move_refs.emplace_back(ref); + change_writer.emplace_back(ref); + } + } + store.move(move_refs); + change_writer.write(move_refs); + move_refs.clear(); store.finishCompact(to_hold); inc_generation(); } - EXPECT_NE(old_ref1, ref1); - EXPECT_NE(old_ref2, ref2); - EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1)); - EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2)); + EXPECT_NE(ref1, refs[0]); + EXPECT_NE(ref2, refs[1]); + EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(refs[0])); + EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(refs[1])); auto usage_after = store.getMemoryUsage(); EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes()); - store.clear(ref1); - store.clear(ref2); + store.clear(refs[0]); + store.clear(refs[1]); } TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted) { auto &store = this->_store; - EntryRef ref1 = add_sequence(4, 40); - EntryRef ref2 = add_sequence(100, 130); + std::vector<EntryRef> refs; + refs.emplace_back(add_sequence(4, 40)); + refs.emplace_back(add_sequence(100, 130)); store.clear(add_sequence(1000, 20000)); inc_generation(); auto usage_before = store.getMemoryUsage(); for (uint32_t pass = 0; pass < 15; ++pass) { - auto to_hold = store.start_compact_worst_btree_nodes(); - store.move_btree_nodes(ref1); - store.move_btree_nodes(ref2); + CompactionStrategy compaction_strategy; + auto to_hold = store.start_compact_worst_btree_nodes(compaction_strategy); + store.move_btree_nodes(refs); store.finish_compact_worst_btree_nodes(to_hold); inc_generation(); } - EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(ref1)); - EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(ref2)); + EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(refs[0])); + EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(refs[1])); auto usage_after = store.getMemoryUsage(); EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes()); - store.clear(ref1); - store.clear(ref2); + store.clear(refs[0]); + store.clear(refs[1]); } TEST_F(BTreeStoreTest, require_that_short_arrays_are_compacted) diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp index 4af0b9672f2..bd4f4f8ee08 100644 --- a/vespalib/src/tests/btree/btree_test.cpp +++ b/vespalib/src/tests/btree/btree_test.cpp @@ -17,6 +17,7 @@ #include <vespa/vespalib/btree/btree.hpp> #include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/vespalib/datastore/buffer_type.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/test/btree/btree_printer.h> #include <vespa/vespalib/gtest/gtest.h> @@ -24,6 +25,7 @@ LOG_SETUP("btree_test"); using vespalib::GenerationHandler; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace vespalib::btree { @@ -1599,8 +1601,9 @@ TEST_F(BTreeTest, require_that_compaction_works) auto memory_usage_before = t.getAllocator().getMemoryUsage(); t.foreach_key([&before_list](int key) { before_list.emplace_back(key); }); make_iterators(t, before_list, before_iterators); + CompactionStrategy compaction_strategy; for (int i = 0; i < 15; ++i) { - t.compact_worst(); + t.compact_worst(compaction_strategy); } inc_generation(g, t); auto memory_usage_after = t.getAllocator().getMemoryUsage(); diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp index dbd6d41f5e6..c58e357a9a1 100644 --- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp +++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp @@ -3,6 +3,8 @@ #include <vespa/vespalib/test/datastore/buffer_stats.h> #include <vespa/vespalib/test/datastore/memstats.h> #include <vespa/vespalib/datastore/array_store.hpp> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/stllike/hash_map.hpp> #include <vespa/vespalib/testkit/testapp.h> #include <vespa/vespalib/test/insertion_operators.h> @@ -124,7 +126,9 @@ struct Fixture } template <typename TestedRefType> void compactWorst(bool compactMemory, bool compactAddressSpace) { - ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace); + CompactionSpec compaction_spec(compactMemory, compactAddressSpace); + CompactionStrategy compaction_strategy; + ICompactionContext::UP ctx = store.compactWorst(compaction_spec, compaction_strategy); std::vector<TestedRefType> refs; for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) { refs.emplace_back(itr->first); diff --git a/vespalib/src/tests/datastore/compact_buffer_candidates/CMakeLists.txt b/vespalib/src/tests/datastore/compact_buffer_candidates/CMakeLists.txt new file mode 100644 index 00000000000..d6731071927 --- /dev/null +++ b/vespalib/src/tests/datastore/compact_buffer_candidates/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_compact_buffer_candidates_test_app TEST + SOURCES + compact_buffer_candidates_test.cpp + DEPENDS + vespalib + GTest::GTest +) +vespa_add_test(NAME vespalib_compact_buffer_candidates_test_app COMMAND vespalib_compact_buffer_candidates_test_app) diff --git a/vespalib/src/tests/datastore/compact_buffer_candidates/compact_buffer_candidates_test.cpp b/vespalib/src/tests/datastore/compact_buffer_candidates/compact_buffer_candidates_test.cpp new file mode 100644 index 00000000000..80c0d571894 --- /dev/null +++ b/vespalib/src/tests/datastore/compact_buffer_candidates/compact_buffer_candidates_test.cpp @@ -0,0 +1,91 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/datastore/compact_buffer_candidates.h> +#include <vespa/vespalib/gtest/gtest.h> + +using vespalib::datastore::CompactBufferCandidates; + +namespace { + +constexpr uint32_t num_buffers = 1024; +constexpr double default_ratio = 0.2 / 2; +constexpr size_t default_slack = 1000; + +}; + + +class CompactBufferCandidatesTest : public ::testing::Test +{ +public: + CompactBufferCandidates candidates; + CompactBufferCandidatesTest(); + ~CompactBufferCandidatesTest() override; + void reset_candidates(uint32_t max_buffers); + CompactBufferCandidatesTest& add(uint32_t buffer_id, size_t used, size_t dead); + void assert_select(const std::vector<uint32_t>& exp); +}; + +CompactBufferCandidatesTest::CompactBufferCandidatesTest() + : ::testing::Test(), + candidates(num_buffers, 1, default_ratio, default_slack) +{ +} + +CompactBufferCandidatesTest::~CompactBufferCandidatesTest() = default; + +void +CompactBufferCandidatesTest::reset_candidates(uint32_t max_buffers) +{ + candidates = CompactBufferCandidates(num_buffers, max_buffers, default_ratio, default_slack); +} + +CompactBufferCandidatesTest& +CompactBufferCandidatesTest::add(uint32_t buffer_id, size_t used, size_t dead) +{ + candidates.add(buffer_id, used, dead); + return *this; +} + +void +CompactBufferCandidatesTest::assert_select(const std::vector<uint32_t>& exp) +{ + std::vector<uint32_t> act; + candidates.select(act); + EXPECT_EQ(exp, act); +} + +TEST_F(CompactBufferCandidatesTest, select_single) +{ + add(0, 10000, 2000).add(1, 10000, 3000); + assert_select({1}); +} + +TEST_F(CompactBufferCandidatesTest, select_two) +{ + reset_candidates(2); + add(0, 10000, 2000).add(3, 10000, 3000).add(7, 10000, 4000); + assert_select({7, 3}); +} + +TEST_F(CompactBufferCandidatesTest, select_all) +{ + reset_candidates(4); + add(1, 10000, 2000).add(3, 10000, 4000).add(8, 10000, 3000); + assert_select({3, 8, 1}); +} + +TEST_F(CompactBufferCandidatesTest, select_cutoff_by_ratio) +{ + reset_candidates(4); + add(1, 100000, 9999).add(3, 100000, 40000).add(8, 100000, 30000); + assert_select({3, 8}); +} + +TEST_F(CompactBufferCandidatesTest, select_cutoff_by_slack) +{ + reset_candidates(4); + add(1, 2000, 999).add(3, 2000, 1200).add(9, 2000, 1300); + assert_select({9, 3}); +} + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp index 799f2c79dd4..796e19a97d1 100644 --- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp +++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/datastore/sharded_hash_map.h> +#include <vespa/vespalib/datastore/entry_ref_filter.h> +#include <vespa/vespalib/datastore/i_compactable.h> #include <vespa/vespalib/datastore/unique_store_allocator.h> #include <vespa/vespalib/datastore/unique_store_comparator.h> @@ -11,11 +13,15 @@ #include <vespa/vespalib/gtest/gtest.h> #include <vespa/vespalib/datastore/unique_store_allocator.hpp> +#include <iostream> +#include <thread> #include <vespa/log/log.h> LOG_SETUP("vespalib_datastore_shared_hash_test"); using vespalib::datastore::EntryRef; +using vespalib::datastore::EntryRefFilter; +using vespalib::datastore::ICompactable; using RefT = vespalib::datastore::EntryRefT<22>; using MyAllocator = vespalib::datastore::UniqueStoreAllocator<uint32_t, RefT>; using MyDataStore = vespalib::datastore::DataStoreT<RefT>; @@ -24,6 +30,72 @@ using MyHashMap = vespalib::datastore::ShardedHashMap; using GenerationHandler = vespalib::GenerationHandler; using vespalib::makeLambdaTask; +constexpr uint32_t small_population = 50; +/* + * large_population should trigger multiple callbacks from normalize_values + * and foreach_value + */ +constexpr uint32_t large_population = 1200; + +namespace vespalib::datastore { + +/* + * Print EntryRef as RefT which is used by test_normalize_values and + * test_foreach_value to differentiate between buffers + */ +void PrintTo(const EntryRef &ref, std::ostream* os) { + RefT iref(ref); + *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")"; +} + +} + +namespace { + +void consider_yield(uint32_t i) +{ + if ((i % 1000) == 0) { + // Need to yield sometimes to avoid livelock when running unit test with valgrind + std::this_thread::yield(); + } +} + +class MyCompactable : public ICompactable +{ + MyAllocator& _allocator; + std::vector<EntryRef>& _new_refs; +public: + MyCompactable(MyAllocator& allocator, std::vector<EntryRef>& new_refs) + : ICompactable(), + _allocator(allocator), + _new_refs(new_refs) + { + } + ~MyCompactable() override = default; + + EntryRef move(EntryRef ref) override { + auto new_ref = _allocator.move(ref); + _allocator.hold(ref); + _new_refs.emplace_back(new_ref); + return new_ref; + } +}; + +uint32_t select_buffer(uint32_t i) { + if ((i % 2) == 0) { + return 0; + } + if ((i % 3) == 0) { + return 1; + } + if ((i % 5) == 0) { + return 2; + } + return 3; +} + +} + struct DataStoreShardedHashTest : public ::testing::Test { GenerationHandler _generationHandler; @@ -50,7 +122,11 @@ struct DataStoreShardedHashTest : public ::testing::Test void read_work(uint32_t cnt); void read_work(); void write_work(uint32_t cnt); - void populate_sample_data(); + void populate_sample_data(uint32_t cnt); + void populate_sample_values(uint32_t cnt); + void clear_sample_values(uint32_t cnt); + void test_normalize_values(bool use_filter, bool one_filter); + void test_foreach_value(bool one_filter); }; @@ -142,6 +218,7 @@ DataStoreShardedHashTest::read_work(uint32_t cnt) EXPECT_EQ(key, wrapped_entry.value()); ++found; } + consider_yield(i); } _done_read_work += i; _found_count += found; @@ -168,6 +245,7 @@ DataStoreShardedHashTest::write_work(uint32_t cnt) remove(key); } commit(); + consider_yield(i); } _done_write_work += cnt; _stop_read = 1; @@ -175,13 +253,94 @@ DataStoreShardedHashTest::write_work(uint32_t cnt) } void -DataStoreShardedHashTest::populate_sample_data() +DataStoreShardedHashTest::populate_sample_data(uint32_t cnt) { - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < cnt; ++i) { insert(i); } } +void +DataStoreShardedHashTest::populate_sample_values(uint32_t cnt) +{ + for (uint32_t i = 0; i < cnt; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + result->second.store_relaxed(RefT(i + 200, select_buffer(i))); + } +} + +void +DataStoreShardedHashTest::clear_sample_values(uint32_t cnt) +{ + for (uint32_t i = 0; i < cnt; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + result->second.store_relaxed(EntryRef()); + } +} + +namespace { + +template <typename RefT> +EntryRefFilter +make_entry_ref_filter(bool one_filter) +{ + if (one_filter) { + EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits); + filter.add_buffer(3); + return filter; + } + return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits); +} + +} + +void +DataStoreShardedHashTest::test_normalize_values(bool use_filter, bool one_filter) +{ + populate_sample_data(large_population); + populate_sample_values(large_population); + if (use_filter) { + auto filter = make_entry_ref_filter<RefT>(one_filter); + EXPECT_TRUE(_hash_map.normalize_values([](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { RefT iref(ref); ref = RefT(iref.offset() + 300, iref.bufferId()); } }, filter)); + } else { + EXPECT_TRUE(_hash_map.normalize_values([](EntryRef ref) noexcept { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); })); + } + for (uint32_t i = 0; i < large_population; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + ASSERT_EQ(select_buffer(i), RefT(result->second.load_relaxed()).bufferId()); + if (use_filter && one_filter && select_buffer(i) != 3) { + ASSERT_EQ(i + 200, RefT(result->second.load_relaxed()).offset()); + } else { + ASSERT_EQ(i + 500, RefT(result->second.load_relaxed()).offset()); + } + result->second.store_relaxed(EntryRef()); + } +} + +void +DataStoreShardedHashTest::test_foreach_value(bool one_filter) +{ + populate_sample_data(large_population); + populate_sample_values(large_population); + + auto filter = make_entry_ref_filter<RefT>(one_filter); + std::vector<EntryRef> exp_refs; + EXPECT_FALSE(_hash_map.normalize_values([&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); }, filter)); + std::vector<EntryRef> act_refs; + _hash_map.foreach_value([&act_refs](const std::vector<EntryRef> &refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); }, filter); + EXPECT_EQ(exp_refs, act_refs); + clear_sample_values(large_population); +} + TEST_F(DataStoreShardedHashTest, single_threaded_reader_without_updates) { _report_work = true; @@ -216,7 +375,7 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported) EXPECT_EQ(0, initial_usage.deadBytes()); EXPECT_EQ(0, initial_usage.allocatedBytesOnHold()); auto guard = _generationHandler.takeGuard(); - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < small_population; ++i) { insert(i); } auto usage = _hash_map.get_memory_usage(); @@ -226,29 +385,31 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported) TEST_F(DataStoreShardedHashTest, foreach_key_works) { - populate_sample_data(); + populate_sample_data(small_population); std::vector<uint32_t> keys; _hash_map.foreach_key([this, &keys](EntryRef ref) { keys.emplace_back(_allocator.get_wrapped(ref).value()); }); std::sort(keys.begin(), keys.end()); - EXPECT_EQ(50, keys.size()); - for (uint32_t i = 0; i < 50; ++i) { + EXPECT_EQ(small_population, keys.size()); + for (uint32_t i = 0; i < small_population; ++i) { EXPECT_EQ(i, keys[i]); } } TEST_F(DataStoreShardedHashTest, move_keys_works) { - populate_sample_data(); + populate_sample_data(small_population); std::vector<EntryRef> refs; _hash_map.foreach_key([&refs](EntryRef ref) { refs.emplace_back(ref); }); std::vector<EntryRef> new_refs; - _hash_map.move_keys([this, &new_refs](EntryRef ref) { auto new_ref = _allocator.move(ref); _allocator.hold(ref); new_refs.emplace_back(new_ref); return new_ref; }); + MyCompactable my_compactable(_allocator, new_refs); + auto filter = make_entry_ref_filter<RefT>(false); + _hash_map.move_keys(my_compactable, filter); std::vector<EntryRef> verify_new_refs; _hash_map.foreach_key([&verify_new_refs](EntryRef ref) { verify_new_refs.emplace_back(ref); }); - EXPECT_EQ(50u, refs.size()); + EXPECT_EQ(small_population, refs.size()); EXPECT_NE(refs, new_refs); EXPECT_EQ(new_refs, verify_new_refs); - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < small_population; ++i) { EXPECT_NE(refs[i], new_refs[i]); auto value = _allocator.get_wrapped(refs[i]).value(); auto new_value = _allocator.get_wrapped(refs[i]).value(); @@ -258,29 +419,33 @@ TEST_F(DataStoreShardedHashTest, move_keys_works) TEST_F(DataStoreShardedHashTest, normalize_values_works) { - populate_sample_data(); - for (uint32_t i = 0; i < 50; ++i) { - MyCompare comp(_store, i); - auto result = _hash_map.find(comp, EntryRef()); - ASSERT_NE(result, nullptr); - EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); - result->second.store_relaxed(EntryRef(i + 200)); - } - _hash_map.normalize_values([](EntryRef ref) noexcept { return EntryRef(ref.ref() + 300); }); - for (uint32_t i = 0; i < 50; ++i) { - MyCompare comp(_store, i); - auto result = _hash_map.find(comp, EntryRef()); - ASSERT_NE(result, nullptr); - EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); - ASSERT_EQ(i + 500, result->second.load_relaxed().ref()); - result->second.store_relaxed(EntryRef()); - } + test_normalize_values(false, false); +} + +TEST_F(DataStoreShardedHashTest, normalize_values_all_filter_works) +{ + test_normalize_values(true, false); +} + +TEST_F(DataStoreShardedHashTest, normalize_values_one_filter_works) +{ + test_normalize_values(true, true); +} + +TEST_F(DataStoreShardedHashTest, foreach_value_all_filter_works) +{ + test_foreach_value(false); +} + +TEST_F(DataStoreShardedHashTest, foreach_value_one_filter_works) +{ + test_foreach_value(true); } TEST_F(DataStoreShardedHashTest, compact_worst_shard_works) { - populate_sample_data(); - for (uint32_t i = 10; i < 50; ++i) { + populate_sample_data(small_population); + for (uint32_t i = 10; i < small_population; ++i) { remove(i); } commit(); diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp index ccb18f13871..917c91f2dff 100644 --- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp +++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp @@ -1,4 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/datastore/unique_store.hpp> #include <vespa/vespalib/datastore/unique_store_remapper.h> #include <vespa/vespalib/datastore/unique_store_string_allocator.hpp> @@ -111,7 +113,9 @@ struct TestBase : public ::testing::Test { store.trimHoldLists(generation); } void compactWorst() { - auto remapper = store.compact_worst(true, true); + CompactionSpec compaction_spec(true, true); + CompactionStrategy compaction_strategy; + auto remapper = store.compact_worst(compaction_spec, compaction_strategy); std::vector<EntryRef> refs; for (const auto &elem : refStore) { refs.push_back(elem.first); diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp index 8d82c10d340..4a8b7eafe6a 100644 --- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp +++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp @@ -1,5 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/datastore/unique_store.hpp> #include <vespa/vespalib/datastore/unique_store_dictionary.hpp> #include <vespa/vespalib/datastore/sharded_hash_map.h> @@ -137,8 +138,9 @@ TYPED_TEST(UniqueStoreDictionaryTest, compaction_works) this->inc_generation(); auto btree_memory_usage_before = this->dict.get_btree_memory_usage(); auto hash_memory_usage_before = this->dict.get_hash_memory_usage(); + CompactionStrategy compaction_strategy; for (uint32_t i = 0; i < 15; ++i) { - this->dict.compact_worst(true, true); + this->dict.compact_worst(true, true, compaction_strategy); } this->inc_generation(); auto btree_memory_usage_after = this->dict.get_btree_memory_usage(); diff --git a/vespalib/src/tests/dotproduct/dotproductbenchmark.cpp b/vespalib/src/tests/dotproduct/dotproductbenchmark.cpp index 77bdbf6f58b..51380028ffa 100644 --- a/vespalib/src/tests/dotproduct/dotproductbenchmark.cpp +++ b/vespalib/src/tests/dotproduct/dotproductbenchmark.cpp @@ -83,7 +83,7 @@ public: ~SparseBenchmark(); protected: struct P { - P(uint32_t key=0, int32_t value=0) : + P(uint32_t key=0, int32_t value=0) noexcept : _key(key), _value(value) { } diff --git a/vespalib/src/tests/hwaccelrated/.gitignore b/vespalib/src/tests/hwaccelrated/.gitignore new file mode 100644 index 00000000000..42f73a39d78 --- /dev/null +++ b/vespalib/src/tests/hwaccelrated/.gitignore @@ -0,0 +1 @@ +vespalib_hwaccelrated_bench_app diff --git a/vespalib/src/tests/hwaccelrated/CMakeLists.txt b/vespalib/src/tests/hwaccelrated/CMakeLists.txt index 960ae840995..9edea9c4472 100644 --- a/vespalib/src/tests/hwaccelrated/CMakeLists.txt +++ b/vespalib/src/tests/hwaccelrated/CMakeLists.txt @@ -6,3 +6,10 @@ vespa_add_executable(vespalib_hwaccelrated_test_app TEST vespalib ) vespa_add_test(NAME vespalib_hwaccelrated_test_app COMMAND vespalib_hwaccelrated_test_app) + +vespa_add_executable(vespalib_hwaccelrated_bench_app + SOURCES + hwaccelrated_bench.cpp + DEPENDS + vespalib +) diff --git a/vespalib/src/tests/hwaccelrated/hwaccelrated_bench.cpp b/vespalib/src/tests/hwaccelrated/hwaccelrated_bench.cpp new file mode 100644 index 00000000000..4b0141596e7 --- /dev/null +++ b/vespalib/src/tests/hwaccelrated/hwaccelrated_bench.cpp @@ -0,0 +1,60 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/hwaccelrated/iaccelrated.h> +#include <vespa/vespalib/hwaccelrated/generic.h> +#include <vespa/vespalib/util/time.h> +#include <cinttypes> + +using namespace vespalib; + +template<typename T> +std::vector<T> createAndFill(size_t sz) { + std::vector<T> v(sz); + for (size_t i(0); i < sz; i++) { + v[i] = rand()%128; + } + return v; +} + +template<typename T> +void +benchmarkEuclideanDistance(const hwaccelrated::IAccelrated & accel, size_t sz, size_t count) { + srand(1); + std::vector<T> a = createAndFill<T>(sz); + std::vector<T> b = createAndFill<T>(sz); + steady_time start = steady_clock::now(); + double sumOfSums(0); + for (size_t j(0); j < count; j++) { + double sum = accel.squaredEuclideanDistance(&a[0], &b[0], sz); + sumOfSums += sum; + } + duration elapsed = steady_clock::now() - start; + printf("sum=%f of N=%zu and vector length=%zu took %" PRId64 "\n", sumOfSums, count, sz, count_ms(elapsed)); +} + +void +benchMarkEuclidianDistance(const hwaccelrated::IAccelrated & accelrator, size_t sz, size_t count) { + printf("double : "); + benchmarkEuclideanDistance<double>(accelrator, sz, count); + printf("float : "); + benchmarkEuclideanDistance<float>(accelrator, sz, count); + printf("int8_t : "); + benchmarkEuclideanDistance<int8_t>(accelrator, sz, count); +} + +int main(int argc, char *argv[]) { + int length = 1000; + int count = 1000000; + if (argc > 1) { + length = atol(argv[1]); + } + if (argc > 2) { + count = atol(argv[2]); + } + printf("%s %d %d\n", argv[0], length, count); + printf("Squared Euclidian Distance - Generic\n"); + benchMarkEuclidianDistance(hwaccelrated::GenericAccelrator(), length, count); + printf("Squared Euclidian Distance - Optimized for this cpu\n"); + benchMarkEuclidianDistance(hwaccelrated::IAccelrated::getAccelerator(), length, count); + return 0; +} diff --git a/vespalib/src/tests/hwaccelrated/hwaccelrated_test.cpp b/vespalib/src/tests/hwaccelrated/hwaccelrated_test.cpp index 3d66769c15a..bbe0ff6663a 100644 --- a/vespalib/src/tests/hwaccelrated/hwaccelrated_test.cpp +++ b/vespalib/src/tests/hwaccelrated/hwaccelrated_test.cpp @@ -3,6 +3,8 @@ #include <vespa/vespalib/testkit/test_kit.h> #include <vespa/vespalib/hwaccelrated/iaccelrated.h> #include <vespa/vespalib/hwaccelrated/generic.h> +#include <vespa/log/log.h> +LOG_SETUP("hwaccelrated_test"); using namespace vespalib; @@ -15,26 +17,34 @@ std::vector<T> createAndFill(size_t sz) { return v; } -template<typename T> -void verifyEuclideanDistance(const hwaccelrated::IAccelrated & accel) { - const size_t testLength(255); +template<typename T, typename P> +void verifyEuclideanDistance(const hwaccelrated::IAccelrated & accel, size_t testLength, double approxFactor) { srand(1); std::vector<T> a = createAndFill<T>(testLength); std::vector<T> b = createAndFill<T>(testLength); for (size_t j(0); j < 0x20; j++) { - T sum(0); + P sum(0); for (size_t i(j); i < testLength; i++) { - sum += (a[i] - b[i]) * (a[i] - b[i]); + P d = P(a[i]) - P(b[i]); + sum += d * d; } - T hwComputedSum(accel.squaredEuclideanDistance(&a[j], &b[j], testLength - j)); - EXPECT_EQUAL(sum, hwComputedSum); + P hwComputedSum(accel.squaredEuclideanDistance(&a[j], &b[j], testLength - j)); + EXPECT_APPROX(sum, hwComputedSum, sum*approxFactor); } } +void +verifyEuclideanDistance(const hwaccelrated::IAccelrated & accelrator, size_t testLength) { + verifyEuclideanDistance<int8_t, double>(accelrator, testLength, 0.0); + verifyEuclideanDistance<float, double>(accelrator, testLength, 0.0001); // Small deviation requiring EXPECT_APPROX + verifyEuclideanDistance<double, double>(accelrator, testLength, 0.0); +} + TEST("test euclidean distance") { hwaccelrated::GenericAccelrator genericAccelrator; - verifyEuclideanDistance<float>(genericAccelrator); - verifyEuclideanDistance<double >(genericAccelrator); + constexpr size_t TEST_LENGTH = 140000; // must be longer than 64k + TEST_DO(verifyEuclideanDistance(hwaccelrated::GenericAccelrator(), TEST_LENGTH)); + TEST_DO(verifyEuclideanDistance(hwaccelrated::IAccelrated::getAccelerator(), TEST_LENGTH)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/invokeservice/CMakeLists.txt b/vespalib/src/tests/invokeservice/CMakeLists.txt new file mode 100644 index 00000000000..a7d7dca806e --- /dev/null +++ b/vespalib/src/tests/invokeservice/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_invokeservice_test_app TEST + SOURCES + invokeservice_test.cpp + DEPENDS + vespalib +) +vespa_add_test(NAME vespalib_invokeservice_test_app COMMAND vespalib_invokeservice_test_app) + diff --git a/vespalib/src/tests/invokeservice/invokeservice_test.cpp b/vespalib/src/tests/invokeservice/invokeservice_test.cpp new file mode 100644 index 00000000000..1d4791f4a33 --- /dev/null +++ b/vespalib/src/tests/invokeservice/invokeservice_test.cpp @@ -0,0 +1,63 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/vespalib/testkit/test_kit.h> +#include <vespa/vespalib/util/invokeserviceimpl.h> +#include <atomic> + +using namespace vespalib; + +struct InvokeCounter { + InvokeCounter() : _count(0) {} + void inc() noexcept { _count++; } + void wait_for_atleast(uint64_t n) { + while (_count <= n) { + std::this_thread::sleep_for(1ms); + } + } + std::atomic<uint64_t> _count; +}; + +TEST("require that wakeup is called") { + InvokeCounter a; + InvokeServiceImpl service(1ms); + EXPECT_EQUAL(0u, a._count); + auto ra = service.registerInvoke([&a]() noexcept { a.inc(); }); + EXPECT_TRUE(ra); + a.wait_for_atleast(1); + ra.reset(); + uint64_t countAtStop = a._count; + std::this_thread::sleep_for(1s); + EXPECT_EQUAL(countAtStop, a._count); +} + +TEST("require that same wakeup can be registered multiple times.") { + InvokeCounter a; + InvokeCounter b; + InvokeCounter c; + InvokeServiceImpl service(1ms); + EXPECT_EQUAL(0u, a._count); + auto ra1 = service.registerInvoke([&a]() noexcept { a.inc(); }); + EXPECT_TRUE(ra1); + auto rb = service.registerInvoke([&b]() noexcept { b.inc(); }); + EXPECT_TRUE(rb); + auto rc = service.registerInvoke([&c]() noexcept { c.inc(); }); + EXPECT_TRUE(rc); + a.wait_for_atleast(1); + b.wait_for_atleast(1); + c.wait_for_atleast(1); + auto ra2 = service.registerInvoke([&a]() noexcept { a.inc(); }); + EXPECT_TRUE(ra2); + + rb.reset(); + uint64_t countAtStop = b._count; + uint64_t a_count = a._count; + uint64_t c_count = c._count; + std::this_thread::sleep_for(1s); + EXPECT_EQUAL(countAtStop, b._count); + + uint64_t diff_c = c._count - c_count; + uint64_t diff_a = a._count - a_count; + EXPECT_LESS((diff_c*3)/2, diff_a); // diff_c*3/2 should still be smaller than diff_a(2x) +} + + +TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp index e129ef2a389..812d06868fd 100644 --- a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp +++ b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp @@ -7,57 +7,93 @@ using namespace vespalib; using namespace vespalib::net::tls; -bool glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { - auto glob = CredentialMatchPattern::create_from_glob(pattern); +bool dns_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { + auto glob = CredentialMatchPattern::create_from_dns_glob(pattern); return glob->matches(string_to_check); } +bool uri_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { + auto glob = CredentialMatchPattern::create_from_uri_glob(pattern); + return glob->matches(string_to_check); +} + +void verify_all_glob_types_match(vespalib::stringref pattern, vespalib::stringref string_to_check) { + EXPECT_TRUE(dns_glob_matches(pattern, string_to_check)); + EXPECT_TRUE(uri_glob_matches(pattern, string_to_check)); +} + +void verify_all_glob_types_mismatch(vespalib::stringref pattern, vespalib::stringref string_to_check) { + EXPECT_FALSE(dns_glob_matches(pattern, string_to_check)); + EXPECT_FALSE(uri_glob_matches(pattern, string_to_check)); +} + TEST("glob without wildcards matches entire string") { - EXPECT_TRUE(glob_matches("foo", "foo")); - EXPECT_FALSE(glob_matches("foo", "fooo")); - EXPECT_FALSE(glob_matches("foo", "ffoo")); + verify_all_glob_types_match("foo", "foo"); + verify_all_glob_types_mismatch("foo", "fooo"); + verify_all_glob_types_mismatch("foo", "ffoo"); } TEST("wildcard glob can match prefix") { - EXPECT_TRUE(glob_matches("foo*", "foo")); - EXPECT_TRUE(glob_matches("foo*", "foobar")); - EXPECT_FALSE(glob_matches("foo*", "ffoo")); + verify_all_glob_types_match("foo*", "foo"); + verify_all_glob_types_match("foo*", "foobar"); + verify_all_glob_types_mismatch("foo*", "ffoo"); } TEST("wildcard glob can match suffix") { - EXPECT_TRUE(glob_matches("*foo", "foo")); - EXPECT_TRUE(glob_matches("*foo", "ffoo")); - EXPECT_FALSE(glob_matches("*foo", "fooo")); + verify_all_glob_types_match("*foo", "foo"); + verify_all_glob_types_match("*foo", "ffoo"); + verify_all_glob_types_mismatch("*foo", "fooo"); } TEST("wildcard glob can match substring") { - EXPECT_TRUE(glob_matches("f*o", "fo")); - EXPECT_TRUE(glob_matches("f*o", "foo")); - EXPECT_TRUE(glob_matches("f*o", "ffoo")); - EXPECT_FALSE(glob_matches("f*o", "boo")); + verify_all_glob_types_match("f*o", "fo"); + verify_all_glob_types_match("f*o", "foo"); + verify_all_glob_types_match("f*o", "ffoo"); + verify_all_glob_types_mismatch("f*o", "boo"); } -TEST("wildcard glob does not cross multiple dot delimiter boundaries") { - EXPECT_TRUE(glob_matches("*.bar.baz", "foo.bar.baz")); - EXPECT_TRUE(glob_matches("*.bar.baz", ".bar.baz")); - EXPECT_FALSE(glob_matches("*.bar.baz", "zoid.foo.bar.baz")); - EXPECT_TRUE(glob_matches("foo.*.baz", "foo.bar.baz")); - EXPECT_FALSE(glob_matches("foo.*.baz", "foo.bar.zoid.baz")); +TEST("single char DNS glob matches single character") { + EXPECT_TRUE(dns_glob_matches("f?o", "foo")); + EXPECT_FALSE(dns_glob_matches("f?o", "fooo")); + EXPECT_FALSE(dns_glob_matches("f?o", "ffoo")); } -TEST("single char glob matches non dot characters") { - EXPECT_TRUE(glob_matches("f?o", "foo")); - EXPECT_FALSE(glob_matches("f?o", "fooo")); - EXPECT_FALSE(glob_matches("f?o", "ffoo")); - EXPECT_FALSE(glob_matches("f?o", "f.o")); +// Due to URIs being able to contain '?' characters as a query separator, don't use it for wildcarding. +TEST("URI glob matching treats question mark character as literal match") { + EXPECT_TRUE(uri_glob_matches("f?o", "f?o")); + EXPECT_FALSE(uri_glob_matches("f?o", "foo")); + EXPECT_FALSE(uri_glob_matches("f?o", "f?oo")); +} + +TEST("wildcard DNS glob does not cross multiple dot delimiter boundaries") { + EXPECT_TRUE(dns_glob_matches("*.bar.baz", "foo.bar.baz")); + EXPECT_TRUE(dns_glob_matches("*.bar.baz", ".bar.baz")); + EXPECT_FALSE(dns_glob_matches("*.bar.baz", "zoid.foo.bar.baz")); + EXPECT_TRUE(dns_glob_matches("foo.*.baz", "foo.bar.baz")); + EXPECT_FALSE(dns_glob_matches("foo.*.baz", "foo.bar.zoid.baz")); +} + +TEST("wildcard URI glob does not cross multiple fwd slash delimiter boundaries") { + EXPECT_TRUE(uri_glob_matches("*/bar/baz", "foo/bar/baz")); + EXPECT_TRUE(uri_glob_matches("*/bar/baz", "/bar/baz")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "bar/baz")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "/bar/baz/")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "zoid/foo/bar/baz")); + EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar/baz")); + EXPECT_FALSE(uri_glob_matches("foo/*/baz", "foo/bar/zoid/baz")); + EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar.zoid/baz")); // No special handling of dots +} + +TEST("single char DNS glob matches non dot characters only") { + EXPECT_FALSE(dns_glob_matches("f?o", "f.o")); } TEST("special basic regex characters are escaped") { - EXPECT_TRUE(glob_matches("$[.\\^", "$[.\\^")); + verify_all_glob_types_match("$[.\\^", "$[.\\^"); } TEST("special extended regex characters are ignored") { - EXPECT_TRUE(glob_matches("{)(+|]}", "{)(+|]}")); + verify_all_glob_types_match("{)(+|]}", "{)(+|]}"); } // TODO CN + SANs @@ -116,7 +152,7 @@ TEST("DNS SAN requirement without glob pattern is matched as exact string") { EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"hello.world.bar"}}))); } -TEST("DNS SAN requirement can include glob wildcards") { +TEST("DNS SAN requirement can include glob wildcards, delimited by dot character") { auto authorized = authorized_peers({policy_with({required_san_dns("*.w?rld")})}); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"hello.world"}}))); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"greetings.w0rld"}}))); @@ -124,8 +160,8 @@ TEST("DNS SAN requirement can include glob wildcards") { EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"world"}}))); } -// FIXME make this RFC 2459-compliant with subdomain matching, case insensitity for host etc -TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violation of RFC 2459") { +// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host +TEST("URI SAN requirement without glob pattern is matched as exact string") { auto authorized = authorized_peers({policy_with({required_san_uri("foo://bar.baz/zoid")})}); EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoid"}}))); EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoi"}}))); @@ -136,6 +172,25 @@ TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violat EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://BAR.baz/zoid"}}))); } +// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host +TEST("URI SAN requirement can include glob wildcards, delimited by fwd slash character") { + auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/*/uri")})}); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uri"}}))); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/really.cool/uri"}}))); // Not delimited by dots + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"theirscheme://my/cool/uri"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://their/cool/uri"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uris"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/swag/uri/"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/uri"}}))); +} + +TEST("URI SAN requirement can include query part even though it's rather silly to do so") { + auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/fancy/*?magic")})}); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/uri?magic"}}))); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/?magic"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/urimagic"}}))); +} + TEST("multi-SAN policy requires all SANs to be present in certificate") { auto authorized = authorized_peers({policy_with({required_san_dns("hello.world"), required_san_dns("foo.bar"), @@ -157,6 +212,13 @@ TEST("wildcard DNS SAN in certificate is not treated as a wildcard match by poli EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"*.world"}}))); } +TEST("wildcard URI SAN in certificate is not treated as a wildcard match by policy") { + auto authorized = authorized_peers({policy_with({required_san_uri("hello://world")})}); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"hello://*"}}))); +} + +// TODO this is just by coincidence since we match '*' as any other character, not because we interpret +// the wildcard in the SAN as anything special during matching. Consider if we need/want to handle explicitly. TEST("wildcard DNS SAN in certificate is still matched by wildcard policy SAN") { auto authorized = authorized_peers({policy_with({required_san_dns("*.world")})}); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"*.world"}}))); diff --git a/vespalib/src/tests/simple_thread_bundle/simple_thread_bundle_test.cpp b/vespalib/src/tests/simple_thread_bundle/simple_thread_bundle_test.cpp index b808635a8cc..8d5d393fe22 100644 --- a/vespalib/src/tests/simple_thread_bundle/simple_thread_bundle_test.cpp +++ b/vespalib/src/tests/simple_thread_bundle/simple_thread_bundle_test.cpp @@ -10,7 +10,7 @@ using namespace vespalib::fixed_thread_bundle; struct Cnt : Runnable { size_t x; - Cnt() : x(0) {} + Cnt() noexcept : x(0) {} void run() override { ++x; } }; diff --git a/vespalib/src/tests/simple_thread_bundle/threading_speed_test.cpp b/vespalib/src/tests/simple_thread_bundle/threading_speed_test.cpp index 13b0d003909..eb82425c819 100644 --- a/vespalib/src/tests/simple_thread_bundle/threading_speed_test.cpp +++ b/vespalib/src/tests/simple_thread_bundle/threading_speed_test.cpp @@ -19,7 +19,7 @@ struct Worker : Runnable { size_t iter; uint64_t input; uint64_t output; - Worker() : iter(1), input(0), output(0) {} + Worker() noexcept : iter(1), input(0), output(0) {} void init(size_t n, uint64_t i) { iter = n; input = i; diff --git a/vespalib/src/tests/stllike/string_test.cpp b/vespalib/src/tests/stllike/string_test.cpp index 95846e765f6..de384bce9ad 100644 --- a/vespalib/src/tests/stllike/string_test.cpp +++ b/vespalib/src/tests/stllike/string_test.cpp @@ -484,4 +484,9 @@ TEST("test that operator<() works with stringref versus string") { EXPECT_FALSE(sb < sb); } +TEST("test that empty_string is shared and empty") { + EXPECT_TRUE(&empty_string() == &empty_string()); + EXPECT_EQUAL(empty_string(), ""); +} + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/thread/thread_test.cpp b/vespalib/src/tests/thread/thread_test.cpp index 43951b4b734..ee4f97c34cc 100644 --- a/vespalib/src/tests/thread/thread_test.cpp +++ b/vespalib/src/tests/thread/thread_test.cpp @@ -6,6 +6,8 @@ using namespace vespalib; +VESPA_THREAD_STACK_TAG(test_agent_thread); + struct Agent : public Runnable { bool started; int loopCnt; @@ -22,7 +24,7 @@ struct Agent : public Runnable { TEST("thread never started") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); } EXPECT_TRUE(!agent.started); EXPECT_EQUAL(0, agent.loopCnt); @@ -31,7 +33,7 @@ TEST("thread never started") { TEST("normal operation") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); thread.start(); std::this_thread::sleep_for(20ms); thread.stop().join(); @@ -43,7 +45,7 @@ TEST("normal operation") { TEST("stop before start") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); thread.stop(); thread.start(); thread.join(); diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp index 9ad0e95667b..cf84ab03a25 100644 --- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp +++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp @@ -19,19 +19,14 @@ assertUsage(const MemoryUsage & exp, const MemoryUsage & act) TEST("test generation holder") { - typedef std::unique_ptr<int32_t> IntPtr; GenerationHolder gh; - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(0))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 0)); gh.transferHoldLists(0); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(1))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 1)); gh.transferHoldLists(1); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(2))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 2)); gh.transferHoldLists(2); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(4))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 4)); gh.transferHoldLists(4); EXPECT_EQUAL(4u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(0); @@ -40,8 +35,7 @@ TEST("test generation holder") EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(2); EXPECT_EQUAL(2u * sizeof(int32_t), gh.getHeldBytes()); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(6))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 6)); gh.transferHoldLists(6); EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(6); diff --git a/vespalib/src/tests/wakeup/.gitignore b/vespalib/src/tests/wakeup/.gitignore new file mode 100644 index 00000000000..16157fbe3ec --- /dev/null +++ b/vespalib/src/tests/wakeup/.gitignore @@ -0,0 +1 @@ +/vespalib_wakeup_bench_app diff --git a/vespalib/src/tests/wakeup/CMakeLists.txt b/vespalib/src/tests/wakeup/CMakeLists.txt new file mode 100644 index 00000000000..f290362d453 --- /dev/null +++ b/vespalib/src/tests/wakeup/CMakeLists.txt @@ -0,0 +1,8 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_wakeup_bench_app TEST + SOURCES + wakeup_bench.cpp + DEPENDS + vespalib + GTest::GTest +) diff --git a/vespalib/src/tests/wakeup/wakeup_bench.cpp b/vespalib/src/tests/wakeup/wakeup_bench.cpp new file mode 100644 index 00000000000..1d9817508d3 --- /dev/null +++ b/vespalib/src/tests/wakeup/wakeup_bench.cpp @@ -0,0 +1,259 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <atomic> +#include <mutex> +#include <condition_variable> +#include <thread> +#include <vespa/vespalib/util/time.h> + +#ifdef __linux__ +#include <linux/futex.h> +#include <sys/syscall.h> +#endif //linux__ + +#include <vespa/vespalib/gtest/gtest.h> + +using namespace vespalib; + +struct State { + std::atomic<uint32_t> value; // 0: ready, 1: wakeup, 2: stop, 3: initial + static_assert(sizeof(value) == sizeof(uint32_t)); + State() : value(3) {} + void set_ready() { + value.store(0, std::memory_order_relaxed); + } + void set_wakeup() { + value.store(1, std::memory_order_relaxed); + } + void set_stop() { + value.store(2, std::memory_order_relaxed); + } + bool is_ready() const { + return (value.load(std::memory_order_relaxed) == 0); + } + bool should_stop() const { + return (value.load(std::memory_order_relaxed) == 2); + } +}; + +struct UseSpin : State { + void wakeup() { + set_wakeup(); + } + void stop() { + set_stop(); + } + void wait() { + while (is_ready()) { + } + } +}; + +struct UseSpinYield : State { + void wakeup() { + set_wakeup(); + } + void stop() { + set_stop(); + } + void wait() { + while (is_ready()) { + std::this_thread::yield(); + } + } +}; + +struct UseCond : State { + std::mutex mutex; + std::condition_variable cond; + void wakeup() { + std::unique_lock<std::mutex> lock(mutex); + set_wakeup(); + cond.notify_one(); + } + void stop() { + std::unique_lock<std::mutex> lock(mutex); + set_stop(); + cond.notify_one(); + } + void wait() { + std::unique_lock<std::mutex> lock(mutex); + while (is_ready()) { + cond.wait(lock); + } + } +}; + +struct UseCondNolock : State { + std::mutex mutex; + std::condition_variable cond; + void wakeup() { + std::unique_lock<std::mutex> lock(mutex); + set_wakeup(); + lock.unlock(); + cond.notify_one(); + } + void stop() { + std::unique_lock<std::mutex> lock(mutex); + set_stop(); + lock.unlock(); + cond.notify_one(); + } + void wait() { + std::unique_lock<std::mutex> lock(mutex); + while (is_ready()) { + cond.wait(lock); + } + } +}; + +struct UsePipe : State { + int pipefd[2]; + UsePipe() { + int res = pipe(pipefd); + assert(res == 0); + } + ~UsePipe() { + close(pipefd[0]); + close(pipefd[1]); + } + void wakeup() { + set_wakeup(); + char token = 'T'; + [[maybe_unused]] ssize_t res = write(pipefd[1], &token, 1); + assert(res == 1); + } + void stop() { + set_stop(); + char token = 'T'; + [[maybe_unused]] ssize_t res = write(pipefd[1], &token, 1); + assert(res == 1); + } + void wait() { + char token_trash[128]; + [[maybe_unused]] ssize_t res = read(pipefd[0], token_trash, sizeof(token_trash)); + assert(res == 1); + } +}; + +#ifdef __linux__ +struct UseFutex : State { + void wakeup() { + set_wakeup(); + syscall(SYS_futex, reinterpret_cast<uint32_t*>(&value), + FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); + } + void stop() { + set_stop(); + syscall(SYS_futex, reinterpret_cast<uint32_t*>(&value), + FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); + } + void wait() { + while (is_ready()) { + syscall(SYS_futex, reinterpret_cast<uint32_t*>(&value), + FUTEX_WAIT_PRIVATE, 0, nullptr, nullptr, 0); + } + } +}; +#endif //linux__ + +template <typename T> +struct Wakeup : T { + using T::should_stop; + using T::set_ready; + using T::wait; + std::thread thread; + Wakeup() : thread([this]{ run(); }) {} + void run() { + while (!should_stop()) { + set_ready(); + wait(); + } + } +}; + +constexpr size_t N = 8; +constexpr size_t WAKE_CNT = 1000000; + +template <typename T> auto create_list() __attribute__((noinline)); +template <typename T> auto create_list() { + std::vector<T*> list; + for (size_t i = 0; i < N; ++i) { + list.push_back(new T()); + } + return list; +} + +template <typename T> +void destroy_list(T &list) __attribute__((noinline)); +template <typename T> +void destroy_list(T &list) { + for (auto *item: list) { + item->stop(); + item->thread.join(); + delete item; + } +} + +template <typename T> +void wait_until_ready(const T &list) __attribute__((noinline)); +template <typename T> +void wait_until_ready(const T &list) { + size_t num_ready = 0; + do { + num_ready = 0; + for (auto *item: list) { + if (item->is_ready()) { + ++num_ready; + } + } + } while (num_ready < N); +} + +template <typename T> +auto perform_wakeups(T &list, size_t target) __attribute__((noinline)); +template <typename T> +auto perform_wakeups(T &list, size_t target) { + size_t wake_cnt = 0; + size_t skip_cnt = 0; + while (wake_cnt < target) { + for (auto *item: list) { + if (item->is_ready()) { + item->wakeup(); + ++wake_cnt; + } else { + ++skip_cnt; + } + } + } + return std::make_pair(wake_cnt, skip_cnt); +} + +template <typename T> +void benchmark() { + auto list = create_list<T>(); + wait_until_ready(list); + auto t0 = steady_clock::now(); + while ((steady_clock::now() - t0) < 1s) { + // warmup + perform_wakeups(list, WAKE_CNT / 64); + } + auto t1 = steady_clock::now(); + auto res = perform_wakeups(list, WAKE_CNT); + auto t2 = steady_clock::now(); + wait_until_ready(list); + destroy_list(list); + fprintf(stderr, "wakeups per second: %zu (skipped: %zu)\n", size_t(res.first / to_s(t2 - t1)), res.second); +} + +TEST(WakeupBench, using_spin) { benchmark<Wakeup<UseSpin>>(); } +TEST(WakeupBench, using_spin_yield) { benchmark<Wakeup<UseSpinYield>>(); } +TEST(WakeupBench, using_cond) { benchmark<Wakeup<UseCond>>(); } +TEST(WakeupBench, using_cond_nolock) { benchmark<Wakeup<UseCondNolock>>(); } +TEST(WakeupBench, using_pipe) { benchmark<Wakeup<UsePipe>>(); } + +#ifdef __linux__ +TEST(WakeupBench, using_futex) { benchmark<Wakeup<UseFutex>>(); } +#endif //linux__ + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h index 2b03e70fbdf..f87d5751743 100644 --- a/vespalib/src/vespa/vespalib/btree/btree.h +++ b/vespalib/src/vespa/vespalib/btree/btree.h @@ -6,6 +6,8 @@ #include "noaggrcalc.h" #include <vespa/vespalib/util/generationhandler.h> +namespace vespalib::datastore { class CompactionStrategy; } + namespace vespalib::btree { /** @@ -149,7 +151,7 @@ public: _tree.thaw(itr); } - void compact_worst(); + void compact_worst(const datastore::CompactionStrategy& compaction_strategy); template <typename FunctionType> void diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp index c4a588bc63e..473d1f4735e 100644 --- a/vespalib/src/vespa/vespalib/btree/btree.hpp +++ b/vespalib/src/vespa/vespalib/btree/btree.hpp @@ -26,9 +26,9 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree() template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, class AggrCalcT> void -BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst() +BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst(const datastore::CompactionStrategy& compaction_strategy) { - auto to_hold = _alloc.start_compact_worst(); + auto to_hold = _alloc.start_compact_worst(compaction_strategy); _tree.move_nodes(_alloc); _alloc.finishCompact(to_hold); } diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h index 325ce0e0e47..30123b1946e 100644 --- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h +++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h @@ -113,6 +113,9 @@ public: return _node->getData(_idx); } + // Only use during compaction when changing reference to moved value + DataType &getWData() { return getWNode()->getWData(_idx); } + bool valid() const { @@ -881,6 +884,9 @@ public: _leaf.getWNode()->writeData(_leaf.getIdx(), data); } + // Only use during compaction when changing reference to moved value + DataType &getWData() { return _leaf.getWData(); } + /** * Set a new key for the current iterator position. * The new key must have the same semantic meaning as the old key. diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp index 2348e770e9d..042779f1b1b 100644 --- a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp @@ -983,6 +983,7 @@ moveFirstLeafNode(BTreeNode::Ref rootRef) InternalNodeTypeRefPair iPair = allocator.moveInternalNode(node); nodeRef = iPair.ref; node = iPair.data; + std::atomic_thread_fence(std::memory_order_release); pnode->setChild(0, nodeRef); moved = true; } @@ -994,6 +995,7 @@ moveFirstLeafNode(BTreeNode::Ref rootRef) LeafNodeTypeRefPair lPair(allocator.moveLeafNode(_leaf.getNode())); _leaf.setNode(lPair.data); + std::atomic_thread_fence(std::memory_order_release); node->setChild(0, lPair.ref); moved = true; } diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.h b/vespalib/src/vespa/vespalib/btree/btreenode.h index d8752d77f0b..468f17fcd1a 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenode.h +++ b/vespalib/src/vespa/vespalib/btree/btreenode.h @@ -99,6 +99,8 @@ public: } const DataT &getData(uint32_t idx) const { return _data[idx]; } + // Only use during compaction when changing reference to moved value + DataT &getWData(uint32_t idx) { return _data[idx]; } void setData(uint32_t idx, const DataT &data) { _data[idx] = data; } static bool hasData() { return true; } }; @@ -120,6 +122,9 @@ public: return BTreeNoLeafData::_instance; } + // Only use during compaction when changing reference to moved value + BTreeNoLeafData &getWData(uint32_t) const { return BTreeNoLeafData::_instance; } + void setData(uint32_t idx, const BTreeNoLeafData &data) { (void) idx; (void) data; diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h index 93615ddef82..27e73b3a2b6 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h @@ -29,6 +29,7 @@ public: using BTreeRootBaseType = BTreeRootBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>; using generation_t = vespalib::GenerationHandler::generation_t; using NodeStore = BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>; + using CompactionStrategy = datastore::CompactionStrategy; using EntryRef = datastore::EntryRef; using DataStoreBase = datastore::DataStoreBase; @@ -165,7 +166,7 @@ public: bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); } std::vector<uint32_t> startCompact() { return _nodeStore.startCompact(); } - std::vector<uint32_t> start_compact_worst() { return _nodeStore.start_compact_worst(); } + std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); } void finishCompact(const std::vector<uint32_t> &toHold) { return _nodeStore.finishCompact(toHold); diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h index 70a9ba6c73c..444bf641899 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h @@ -56,6 +56,7 @@ public: typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; typedef vespalib::GenerationHandler::generation_t generation_t; using EntryRef = datastore::EntryRef; + using CompactionStrategy = datastore::CompactionStrategy; enum NodeTypes { @@ -159,7 +160,7 @@ public: std::vector<uint32_t> startCompact(); - std::vector<uint32_t> start_compact_worst(); + std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy); void finishCompact(const std::vector<uint32_t> &toHold); diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp index ff4fa59cd74..91953507eb0 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp @@ -3,6 +3,7 @@ #pragma once #include "btreenodestore.h" +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/datastore.hpp> namespace vespalib::btree { @@ -71,9 +72,9 @@ template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS, size_t LEAF_SLOTS> std::vector<uint32_t> BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>:: -start_compact_worst() +start_compact_worst(const CompactionStrategy &compaction_strategy) { - return _store.startCompactWorstBuffers(true, false); + return _store.startCompactWorstBuffers(datastore::CompactionSpec(true, false), compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h index 82913987e44..a79259c6e57 100644 --- a/vespalib/src/vespa/vespalib/btree/btreestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreestore.h @@ -49,6 +49,8 @@ public: TraitsT::INTERNAL_SLOTS, TraitsT::LEAF_SLOTS, AggrCalcT> Builder; + using CompactionSpec = datastore::CompactionSpec; + using CompactionStrategy = datastore::CompactionStrategy; using EntryRef = datastore::EntryRef; template <typename EntryType> using BufferType = datastore::BufferType<EntryType>; @@ -298,6 +300,9 @@ public: bool isSmallArray(const EntryRef ref) const; + static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; } + bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); } + /** * Returns the cluster size for the type id. * Cluster size == 0 means we have a tree for the given reference. @@ -389,12 +394,12 @@ public: void foreach_frozen(EntryRef ref, FunctionType func) const; - std::vector<uint32_t> start_compact_worst_btree_nodes(); + std::vector<uint32_t> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold); - void move_btree_nodes(EntryRef ref); + void move_btree_nodes(const std::vector<EntryRef>& refs); - std::vector<uint32_t> start_compact_worst_buffers(); - EntryRef move(EntryRef ref); + std::vector<uint32_t> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); + void move(std::vector<EntryRef>& refs); private: static constexpr size_t MIN_BUFFER_ARRAYS = 128u; diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp index 15c546a0368..c0985ff8f94 100644 --- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp @@ -5,6 +5,7 @@ #include "btreestore.h" #include "btreebuilder.h" #include "btreebuilder.hpp" +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/datastore.hpp> #include <vespa/vespalib/util/optimized.h> @@ -972,10 +973,10 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> std::vector<uint32_t> BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -start_compact_worst_btree_nodes() +start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) { _builder.clear(); - return _allocator.start_compact_worst(); + return _allocator.start_compact_worst(compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, typename CompareT, @@ -991,15 +992,15 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> void BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -move_btree_nodes(EntryRef ref) +move_btree_nodes(const std::vector<EntryRef>& refs) { - if (ref.valid()) { + for (auto& ref : refs) { RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - tree->move_nodes(_allocator); - } + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + assert(isBTree(typeId)); + BTreeType *tree = getWTreeEntry(iRef); + tree->move_nodes(_allocator); } } @@ -1007,31 +1008,33 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> std::vector<uint32_t> BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -start_compact_worst_buffers() +start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { freeze(); - return _store.startCompactWorstBuffers(true, false); + return _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> -typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::EntryRef +void BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -move(EntryRef ref) +move(std::vector<EntryRef> &refs) { - if (!ref.valid() || !_store.getCompacting(ref)) { - return ref; - } - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - return ref_and_ptr.ref; + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + assert(_store.getCompacting(iRef)); + uint32_t clusterSize = getClusterSize(iRef); + if (clusterSize == 0) { + BTreeType *tree = getWTreeEntry(iRef); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + ref = ref_and_ptr.ref; + } else { + const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); + ref = allocKeyDataCopy(shortArray, clusterSize).ref; + } } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return allocKeyDataCopy(shortArray, clusterSize).ref; } } diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt index 6c6f5258555..d628843279d 100644 --- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt @@ -5,9 +5,12 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT array_store_config.cpp buffer_type.cpp bufferstate.cpp + compaction_strategy.cpp + compact_buffer_candidates.cpp datastore.cpp datastorebase.cpp entryref.cpp + entry_ref_filter.cpp fixed_size_hash_map.cpp sharded_hash_map.cpp unique_store.cpp diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h index 3ba0caae5b9..d9b62c310b5 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.h +++ b/vespalib/src/vespa/vespalib/datastore/array_store.h @@ -96,7 +96,7 @@ public: } void remove(EntryRef ref); - ICompactionContext::UP compactWorst(bool compactMemory, bool compactAddressSpace); + ICompactionContext::UP compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); } /** diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp index 5600c64eb3d..bbbd52c354d 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp @@ -3,6 +3,8 @@ #pragma once #include "array_store.h" +#include "compaction_spec.h" +#include "entry_ref_filter.h" #include "datastore.hpp" #include <atomic> #include <algorithm> @@ -127,47 +129,38 @@ private: DataStoreBase &_dataStore; ArrayStoreType &_store; std::vector<uint32_t> _bufferIdsToCompact; + EntryRefFilter _filter; - bool compactingBuffer(uint32_t bufferId) { - return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(), - bufferId) != _bufferIdsToCompact.end(); - } public: CompactionContext(DataStoreBase &dataStore, ArrayStoreType &store, std::vector<uint32_t> bufferIdsToCompact) : _dataStore(dataStore), _store(store), - _bufferIdsToCompact(std::move(bufferIdsToCompact)) - {} + _bufferIdsToCompact(std::move(bufferIdsToCompact)), + _filter(RefT::numBuffers(), RefT::offset_bits) + { + _filter.add_buffers(_bufferIdsToCompact); + } ~CompactionContext() override { _dataStore.finishCompact(_bufferIdsToCompact); } void compact(vespalib::ArrayRef<EntryRef> refs) override { - if (!_bufferIdsToCompact.empty()) { - for (auto &ref : refs) { - if (ref.valid()) { - RefT internalRef(ref); - if (compactingBuffer(internalRef.bufferId())) { - EntryRef newRef = _store.add(_store.get(ref)); - std::atomic_thread_fence(std::memory_order_release); - ref = newRef; - } - } + for (auto &ref : refs) { + if (ref.valid() && _filter.has(ref)) { + EntryRef newRef = _store.add(_store.get(ref)); + std::atomic_thread_fence(std::memory_order_release); + ref = newRef; } } } void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override { - if (!_bufferIdsToCompact.empty()) { - for (auto &ref : refs) { - if (ref.load_relaxed().valid()) { - RefT internalRef(ref.load_relaxed()); - if (compactingBuffer(internalRef.bufferId())) { - EntryRef newRef = _store.add(_store.get(ref.load_relaxed())); - std::atomic_thread_fence(std::memory_order_release); - ref.store_release(newRef); - } - } + for (auto &atomic_entry_ref : refs) { + auto ref = atomic_entry_ref.load_relaxed(); + if (ref.valid() && _filter.has(ref)) { + EntryRef newRef = _store.add(_store.get(ref)); + std::atomic_thread_fence(std::memory_order_release); + atomic_entry_ref.store_release(newRef); } } } @@ -177,9 +170,9 @@ public: template <typename EntryT, typename RefT> ICompactionContext::UP -ArrayStore<EntryT, RefT>::compactWorst(bool compactMemory, bool compactAddressSpace) +ArrayStore<EntryT, RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy) { - std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compactMemory, compactAddressSpace); + std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); return std::make_unique<arraystore::CompactionContext<EntryT, RefT>> (_store, *this, std::move(bufferIdsToCompact)); } diff --git a/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidate.h b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidate.h new file mode 100644 index 00000000000..85ea1e42eac --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidate.h @@ -0,0 +1,36 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <cstddef> +#include <cstdint> + +namespace vespalib::datastore { + +/* + * Class representing candidate buffer for compaction. + */ +class CompactBufferCandidate { + uint32_t _buffer_id; + size_t _used; + size_t _dead; +public: + CompactBufferCandidate(uint32_t buffer_id, size_t used, size_t dead) noexcept + : _buffer_id(buffer_id), + _used(used), + _dead(dead) + { + } + + CompactBufferCandidate() noexcept + : CompactBufferCandidate(0, 0, 0) + { + } + + bool operator<(const CompactBufferCandidate& rhs) const noexcept { return _dead > rhs._dead; } + uint32_t get_buffer_id() const noexcept { return _buffer_id; } + size_t get_used() const noexcept { return _used; } + size_t get_dead() const noexcept { return _dead; } +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp new file mode 100644 index 00000000000..3003ef315e8 --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.cpp @@ -0,0 +1,52 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "compact_buffer_candidates.h" +#include <algorithm> + +namespace vespalib::datastore { + +CompactBufferCandidates::CompactBufferCandidates(uint32_t num_buffers, uint32_t max_buffers, double ratio, size_t slack) + : _candidates(), + _used(0), + _dead(0), + _max_buffers(std::max(max_buffers, 1u)), + _ratio(ratio), + _slack(slack) +{ + _candidates.reserve(num_buffers); +} + +CompactBufferCandidates::~CompactBufferCandidates() = default; + +void +CompactBufferCandidates::add(uint32_t buffer_id, size_t used, size_t dead) +{ + _candidates.emplace_back(buffer_id, used, dead); + _used += used; + _dead += dead; +} + +void +CompactBufferCandidates::select(std::vector<uint32_t>& buffers) +{ + if (_candidates.empty()) { + return; + } + if (_candidates.size() > _max_buffers) { + std::nth_element(_candidates.begin(), _candidates.begin() + (_max_buffers - 1), _candidates.end()); + _candidates.resize(_max_buffers); + } + std::sort(_candidates.begin(), _candidates.end()); + size_t remaining_used = _used; + size_t remaining_dead = _dead; + for (auto& candidate : _candidates) { + buffers.emplace_back(candidate.get_buffer_id()); + remaining_used -= candidate.get_used(); + remaining_dead -= candidate.get_dead(); + if ((remaining_dead < _slack) || (remaining_dead <= remaining_used * _ratio)) { + break; + } + } +} + +} diff --git a/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.h b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.h new file mode 100644 index 00000000000..59d35422328 --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compact_buffer_candidates.h @@ -0,0 +1,27 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "compact_buffer_candidate.h" +#include <vector> + +namespace vespalib::datastore { + +/* + * Class representing candidate buffers for compaction. + */ +class CompactBufferCandidates { + std::vector<CompactBufferCandidate> _candidates; + size_t _used; + size_t _dead; + uint32_t _max_buffers; + double _ratio; + size_t _slack; +public: + CompactBufferCandidates(uint32_t num_buffers, uint32_t max_buffers, double ratio, size_t slack); + ~CompactBufferCandidates(); + void add(uint32_t buffer_id, size_t used, size_t dead); + void select(std::vector<uint32_t>& buffers); +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_spec.h b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h new file mode 100644 index 00000000000..c554f3229dd --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h @@ -0,0 +1,34 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace vespalib::datastore { + +/* + * Class describing how to compact a compactable data structure. + * + * memory - to reduce amount of "dead" memory + * address_space - to avoid running out of free buffers in data store + * (i.e. move data from small buffers to larger buffers) + */ +class CompactionSpec +{ + bool _compact_memory; + bool _compact_address_space; +public: + CompactionSpec() + : _compact_memory(false), + _compact_address_space(false) + { + } + CompactionSpec(bool compact_memory_, bool compact_address_space_) noexcept + : _compact_memory(compact_memory_), + _compact_address_space(compact_address_space_) + { + } + bool compact() const noexcept { return _compact_memory || _compact_address_space; } + bool compact_memory() const noexcept { return _compact_memory; } + bool compact_address_space() const noexcept { return _compact_address_space; } +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp new file mode 100644 index 00000000000..2dbd501f78e --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp @@ -0,0 +1,37 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "compaction_strategy.h" +#include "compaction_spec.h" +#include <vespa/vespalib/util/memoryusage.h> +#include <vespa/vespalib/util/address_space.h> +#include <iostream> + +namespace vespalib::datastore { + +bool +CompactionStrategy::should_compact_memory(const MemoryUsage& memory_usage) const +{ + return should_compact_memory(memory_usage.usedBytes(), memory_usage.deadBytes()); +} + +bool +CompactionStrategy::should_compact_address_space(const AddressSpace& address_space) const +{ + return should_compact_address_space(address_space.used(), address_space.dead()); +} + +CompactionSpec +CompactionStrategy::should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const +{ + return CompactionSpec(should_compact_memory(memory_usage), should_compact_address_space(address_space)); +} + +std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy) +{ + os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() << + ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() << + "}"; + return os; +} + +} diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h index ced28436471..9ca4a64a55b 100644 --- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h +++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h @@ -3,51 +3,73 @@ #pragma once #include <iosfwd> +#include <cstdint> -namespace search { +namespace vespalib { + +class AddressSpace; +class MemoryUsage; + +} + +namespace vespalib::datastore { + +class CompactionSpec; /* * Class describing compaction strategy for a compactable data structure. */ class CompactionStrategy { +public: + static constexpr size_t DEAD_BYTES_SLACK = 0x10000u; + static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u; private: double _maxDeadBytesRatio; // Max ratio of dead bytes before compaction double _maxDeadAddressSpaceRatio; // Max ratio of dead address space before compaction + uint32_t _max_buffers; // Max number of buffers to compact for each reason (memory usage, address space usage) + bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const { + return ((dead_bytes >= DEAD_BYTES_SLACK) && + (dead_bytes > used_bytes * getMaxDeadBytesRatio())); + } + bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const { + return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) && + (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio())); + } public: CompactionStrategy() noexcept : _maxDeadBytesRatio(0.05), - _maxDeadAddressSpaceRatio(0.2) + _maxDeadAddressSpaceRatio(0.2), + _max_buffers(1) { } CompactionStrategy(double maxDeadBytesRatio, double maxDeadAddressSpaceRatio) noexcept : _maxDeadBytesRatio(maxDeadBytesRatio), - _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio) + _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio), + _max_buffers(1) + { + } + CompactionStrategy(double maxDeadBytesRatio, double maxDeadAddressSpaceRatio, uint32_t max_buffers) noexcept + : _maxDeadBytesRatio(maxDeadBytesRatio), + _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio), + _max_buffers(max_buffers) { } double getMaxDeadBytesRatio() const { return _maxDeadBytesRatio; } double getMaxDeadAddressSpaceRatio() const { return _maxDeadAddressSpaceRatio; } + uint32_t get_max_buffers() const noexcept { return _max_buffers; } bool operator==(const CompactionStrategy & rhs) const { - return _maxDeadBytesRatio == rhs._maxDeadBytesRatio && - _maxDeadAddressSpaceRatio == rhs._maxDeadAddressSpaceRatio; + return (_maxDeadBytesRatio == rhs._maxDeadBytesRatio) && + (_maxDeadAddressSpaceRatio == rhs._maxDeadAddressSpaceRatio) && + (_max_buffers == rhs._max_buffers); } bool operator!=(const CompactionStrategy & rhs) const { return !(operator==(rhs)); } - static constexpr size_t DEAD_BYTES_SLACK = 0x10000u; - - bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const { - return ((dead_bytes >= DEAD_BYTES_SLACK) && - (dead_bytes > used_bytes * getMaxDeadBytesRatio())); - } - - static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u; - - bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const { - return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) && - (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio())); - } + bool should_compact_memory(const MemoryUsage& memory_usage) const; + bool should_compact_address_space(const AddressSpace& address_space) const; + CompactionSpec should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const; }; std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy); -} // namespace search +} diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp index b5cab50bc33..f137d5379fb 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp @@ -1,8 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "datastore.h" +#include "compact_buffer_candidates.h" +#include "compaction_spec.h" +#include "compaction_strategy.h" #include <vespa/vespalib/util/array.hpp> #include <vespa/vespalib/util/stringfmt.h> +#include <algorithm> #include <limits> #include <cassert> @@ -526,42 +530,37 @@ DataStoreBase::markCompacting(uint32_t bufferId) } std::vector<uint32_t> -DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace) +DataStoreBase::startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - constexpr uint32_t noBufferId = std::numeric_limits<uint32_t>::max(); - uint32_t worstMemoryBufferId = noBufferId; - uint32_t worstAddressSpaceBufferId = noBufferId; - size_t worstDeadElems = 0; - size_t worstDeadArrays = 0; + // compact memory usage + CompactBufferCandidates elem_buffers(_numBuffers, compaction_strategy.get_max_buffers(), compaction_strategy.getMaxDeadBytesRatio() / 2, CompactionStrategy::DEAD_BYTES_SLACK); + // compact address space + CompactBufferCandidates array_buffers(_numBuffers, compaction_strategy.get_max_buffers(), compaction_strategy.getMaxDeadAddressSpaceRatio() / 2, CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK); for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) { const auto &state = getBufferState(bufferId); if (state.isActive()) { auto typeHandler = state.getTypeHandler(); uint32_t arraySize = typeHandler->getArraySize(); uint32_t reservedElements = typeHandler->getReservedElements(bufferId); + size_t used_elems = state.size(); size_t deadElems = state.getDeadElems() - reservedElements; - if (compactMemory && deadElems > worstDeadElems) { - worstMemoryBufferId = bufferId; - worstDeadElems = deadElems; + if (compaction_spec.compact_memory()) { + elem_buffers.add(bufferId, used_elems, deadElems); } - if (compactAddressSpace) { - size_t deadArrays = deadElems / arraySize; - if (deadArrays > worstDeadArrays) { - worstAddressSpaceBufferId = bufferId; - worstDeadArrays = deadArrays; - } + if (compaction_spec.compact_address_space()) { + array_buffers.add(bufferId, used_elems / arraySize, deadElems / arraySize); } } } std::vector<uint32_t> result; - if (worstMemoryBufferId != noBufferId) { - markCompacting(worstMemoryBufferId); - result.emplace_back(worstMemoryBufferId); - } - if (worstAddressSpaceBufferId != noBufferId && - worstAddressSpaceBufferId != worstMemoryBufferId) { - markCompacting(worstAddressSpaceBufferId); - result.emplace_back(worstAddressSpaceBufferId); + result.reserve(std::min(_numBuffers, 2 * compaction_strategy.get_max_buffers())); + elem_buffers.select(result); + array_buffers.select(result); + std::sort(result.begin(), result.end()); + auto last = std::unique(result.begin(), result.end()); + result.erase(last, result.end()); + for (auto buffer_id : result) { + markCompacting(buffer_id); } return result; } diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h index 6903ae12c9c..e98d9531806 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h @@ -12,6 +12,9 @@ namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; + /** * Abstract class used to store data of potential different types in underlying memory buffers. * @@ -368,7 +371,7 @@ public: } uint32_t startCompactWorstBuffer(uint32_t typeId); - std::vector<uint32_t> startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace); + std::vector<uint32_t> startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy); uint64_t get_compaction_count() const { return _compaction_count.load(std::memory_order_relaxed); } void inc_compaction_count() const { ++_compaction_count; } bool has_held_buffers() const noexcept { return _hold_buffer_count != 0u; } diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp new file mode 100644 index 00000000000..87c3c87636c --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "entry_ref_filter.h" + +namespace vespalib::datastore { + +EntryRefFilter::EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits) + : _filter(std::move(filter)), + _offset_bits(offset_bits) +{ +} + +EntryRefFilter::EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits) + : _filter(num_buffers), + _offset_bits(offset_bits) +{ +} + +EntryRefFilter::~EntryRefFilter() = default; + +EntryRefFilter +EntryRefFilter::create_all_filter(uint32_t num_buffers, uint32_t offset_bits) +{ + std::vector<bool> filter(num_buffers, true); + return EntryRefFilter(std::move(filter), offset_bits); +} + +} diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h new file mode 100644 index 00000000000..c06d843fbd0 --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h @@ -0,0 +1,35 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "entryref.h" +#include <vector> + +namespace vespalib::datastore { + +/* + * Class to filter entry refs based on which buffer the entry is referencing. + * + * Buffers being allowed have corresponding bit in _filter set. + */ +class EntryRefFilter { + std::vector<bool> _filter; + uint32_t _offset_bits; + EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits); +public: + EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits); + ~EntryRefFilter(); + bool has(EntryRef ref) const { + uint32_t buffer_id = ref.buffer_id(_offset_bits); + return _filter[buffer_id]; + } + void add_buffer(uint32_t buffer_id) { _filter[buffer_id] = true; } + void add_buffers(const std::vector<uint32_t>& ids) { + for (auto buffer_id : ids) { + _filter[buffer_id] = true; + } + } + static EntryRefFilter create_all_filter(uint32_t num_buffers, uint32_t offset_bits); +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/entryref.h b/vespalib/src/vespa/vespalib/datastore/entryref.h index ddbc677fc18..7667cc3d2c1 100644 --- a/vespalib/src/vespa/vespalib/datastore/entryref.h +++ b/vespalib/src/vespa/vespalib/datastore/entryref.h @@ -18,6 +18,7 @@ public: uint32_t ref() const noexcept { return _ref; } uint32_t hash() const noexcept { return _ref; } bool valid() const noexcept { return _ref != 0u; } + uint32_t buffer_id(uint32_t offset_bits) const noexcept { return _ref >> offset_bits; } bool operator==(const EntryRef &rhs) const noexcept { return _ref == rhs._ref; } bool operator!=(const EntryRef &rhs) const noexcept { return _ref != rhs._ref; } bool operator <(const EntryRef &rhs) const noexcept { return _ref < rhs._ref; } @@ -31,6 +32,7 @@ public: template <uint32_t OffsetBits, uint32_t BufferBits = 32u - OffsetBits> class EntryRefT : public EntryRef { public: + static constexpr uint32_t offset_bits = OffsetBits; EntryRefT() noexcept : EntryRef() {} EntryRefT(size_t offset_, uint32_t bufferId_) noexcept; EntryRefT(const EntryRef & ref_) noexcept : EntryRef(ref_.ref()) {} diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp index 9f56938f49c..6f001ce3c94 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp @@ -2,6 +2,8 @@ #include "fixed_size_hash_map.h" #include "entry_comparator.h" +#include "entry_ref_filter.h" +#include "i_compactable.h" #include <vespa/vespalib/util/array.hpp> #include <vespa/vespalib/util/memoryusage.h> #include <cassert> @@ -181,15 +183,16 @@ FixedSizeHashMap::foreach_key(const std::function<void(EntryRef)>& callback) con } void -FixedSizeHashMap::move_keys(const std::function<EntryRef(EntryRef)>& callback) +FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers) { for (auto& chain_head : _chain_heads) { uint32_t node_idx = chain_head.load_relaxed(); while (node_idx != no_node_idx) { auto& node = _nodes[node_idx]; EntryRef old_ref = node.get_kv().first.load_relaxed(); - EntryRef new_ref = callback(old_ref); - if (new_ref != old_ref) { + assert(old_ref.valid()); + if (compacting_buffers.has(old_ref)) { + EntryRef new_ref = compactable.move(old_ref); node.get_kv().first.store_release(new_ref); } node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); @@ -217,4 +220,104 @@ FixedSizeHashMap::normalize_values(const std::function<EntryRef(EntryRef)>& norm return changed; } +namespace { + +class ChangeWriter { + std::vector<AtomicEntryRef*> _atomic_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + bool write(const std::vector<EntryRef> &refs); + void emplace_back(AtomicEntryRef &atomic_ref) { _atomic_refs.emplace_back(&atomic_ref); } +}; + +ChangeWriter::ChangeWriter(uint32_t capacity) + : _atomic_refs() +{ + _atomic_refs.reserve(capacity); +} + +ChangeWriter::~ChangeWriter() = default; + +bool +ChangeWriter::write(const std::vector<EntryRef> &refs) +{ + bool changed = false; + assert(refs.size() == _atomic_refs.size()); + auto atomic_ref = _atomic_refs.begin(); + for (auto ref : refs) { + EntryRef old_ref = (*atomic_ref)->load_relaxed(); + if (ref != old_ref) { + (*atomic_ref)->store_release(ref); + changed = true; + } + ++atomic_ref; + } + assert(atomic_ref == _atomic_refs.end()); + _atomic_refs.clear(); + return changed; +} + +} + +bool +FixedSizeHashMap::normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter) +{ + std::vector<EntryRef> refs; + refs.reserve(1024); + bool changed = false; + ChangeWriter change_writer(refs.capacity()); + for (auto& chain_head : _chain_heads) { + uint32_t node_idx = chain_head.load_relaxed(); + while (node_idx != no_node_idx) { + auto& node = _nodes[node_idx]; + EntryRef ref = node.get_kv().second.load_relaxed(); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + change_writer.emplace_back(node.get_kv().second); + if (refs.size() >= refs.capacity()) { + normalize(refs); + changed |= change_writer.write(refs); + refs.clear(); + } + } + } + node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); + } + } + if (!refs.empty()) { + normalize(refs); + changed |= change_writer.write(refs); + } + return changed; +} + +void +FixedSizeHashMap::foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter) +{ + std::vector<EntryRef> refs; + refs.reserve(1024); + for (auto& chain_head : _chain_heads) { + uint32_t node_idx = chain_head.load_relaxed(); + while (node_idx != no_node_idx) { + auto& node = _nodes[node_idx]; + EntryRef ref = node.get_kv().second.load_relaxed(); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + if (refs.size() >= refs.capacity()) { + callback(refs); + refs.clear(); + } + } + } + node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); + } + } + if (!refs.empty()) { + callback(refs); + } +} + } diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h index f6990646c0c..c522bcc3c33 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h @@ -18,6 +18,9 @@ class MemoryUsage; } namespace vespalib::datastore { +class EntryRefFilter; +struct ICompactable; + class ShardedHashComparator { public: ShardedHashComparator(const EntryComparator& comp, const EntryRef key_ref, uint32_t num_shards) @@ -156,8 +159,26 @@ public: size_t size() const noexcept { return _count; } MemoryUsage get_memory_usage() const; void foreach_key(const std::function<void(EntryRef)>& callback) const; - void move_keys(const std::function<EntryRef(EntryRef)>& callback); + void move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers); + /* + * Scan dictionary and call normalize function for each value. If + * returned value is different then write back the modified value to + * the dictionary. Used when clearing all posting lists. + */ bool normalize_values(const std::function<EntryRef(EntryRef)>& normalize); + /* + * Scan dictionary and call normalize function for batches of values + * that pass the filter. Write back modified values to the dictionary. + * Used by compaction of posting lists when moving short arrays, + * bitvectors or btree roots. + */ + bool normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter); + /* + * Scan dictionary and call callback function for batches of values + * that pass the filter. Used by compaction of posting lists when + * moving btree nodes. + */ + void foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter); }; } diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h index 35dbe1795f6..4fd3bcad5e5 100644 --- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h @@ -10,7 +10,9 @@ namespace vespalib::datastore { +class CompactionStrategy; class EntryComparator; +class EntryRefFilter; struct ICompactable; class IUniqueStoreDictionaryReadSnapshot; class UniqueStoreAddResult; @@ -28,7 +30,7 @@ public: virtual UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) = 0; virtual EntryRef find(const EntryComparator& comp) = 0; virtual void remove(const EntryComparator& comp, EntryRef ref) = 0; - virtual void move_entries(ICompactable& compactable) = 0; + virtual void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0; virtual uint32_t get_num_uniques() const = 0; virtual vespalib::MemoryUsage get_memory_usage() const = 0; virtual void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) = 0; @@ -40,7 +42,7 @@ public: virtual vespalib::MemoryUsage get_btree_memory_usage() const = 0; virtual vespalib::MemoryUsage get_hash_memory_usage() const = 0; virtual bool has_held_buffers() const = 0; - virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) = 0; + virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) = 0; }; } diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp index b7766d8a4e3..019b98a53dd 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp @@ -171,12 +171,12 @@ ShardedHashMap::foreach_key(std::function<void(EntryRef)> callback) const } void -ShardedHashMap::move_keys(std::function<EntryRef(EntryRef)> callback) +ShardedHashMap::move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) { for (size_t i = 0; i < num_shards; ++i) { auto map = _maps[i].load(std::memory_order_relaxed); if (map != nullptr) { - map->move_keys(callback); + map->move_keys(compactable, compacting_buffers); } } } @@ -195,6 +195,31 @@ ShardedHashMap::normalize_values(std::function<EntryRef(EntryRef)> normalize) } bool +ShardedHashMap::normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) +{ + bool changed = false; + for (size_t i = 0; i < num_shards; ++i) { + auto map = _maps[i].load(std::memory_order_relaxed); + if (map != nullptr) { + changed |= map->normalize_values(normalize, filter); + } + } + return changed; +} + +void +ShardedHashMap::foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) +{ + for (size_t i = 0; i < num_shards; ++i) { + auto map = _maps[i].load(std::memory_order_relaxed); + if (map != nullptr) { + map->foreach_value(callback, filter); + } + } +} + + +bool ShardedHashMap::has_held_buffers() const { return _gen_holder.getHeldBytes() != 0; diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h index 0f20c6a6e30..e0ba9488351 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h @@ -10,8 +10,10 @@ namespace vespalib { class MemoryUsage; } namespace vespalib::datastore { -class FixedSizeHashMap; class EntryComparator; +class EntryRefFilter; +class FixedSizeHashMap; +struct ICompactable; /* * Hash map over keys in data store, meant to support a faster @@ -56,8 +58,10 @@ public: const EntryComparator &get_default_comparator() const noexcept { return *_comp; } MemoryUsage get_memory_usage() const; void foreach_key(std::function<void(EntryRef)> callback) const; - void move_keys(std::function<EntryRef(EntryRef)> callback); + void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers); bool normalize_values(std::function<EntryRef(EntryRef)> normalize); + bool normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter); + void foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter); bool has_held_buffers() const; void compact_worst_shard(); }; diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h index 38643d84be0..aea98f406e8 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h @@ -55,11 +55,11 @@ public: EntryRef find(EntryConstRefType value); EntryConstRefType get(EntryRef ref) const { return _allocator.get(ref); } void remove(EntryRef ref); - std::unique_ptr<Remapper> compact_worst(bool compact_memory, bool compact_address_space); + std::unique_ptr<Remapper> compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); vespalib::MemoryUsage getMemoryUsage() const; vespalib::MemoryUsage get_values_memory_usage() const { return _store.getMemoryUsage(); } vespalib::MemoryUsage get_dictionary_memory_usage() const { return _dict->get_memory_usage(); } - vespalib::AddressSpace get_address_space_usage() const; + vespalib::AddressSpace get_values_address_space_usage() const; // TODO: Consider exposing only the needed functions from allocator Allocator& get_allocator() { return _allocator; } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp index 85894cfa7dd..b73b714a6bc 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp @@ -102,34 +102,27 @@ private: std::vector<uint32_t> _bufferIdsToCompact; void allocMapping() { - _compacting_buffer.resize(RefT::numBuffers()); _mapping.resize(RefT::numBuffers()); for (const auto bufferId : _bufferIdsToCompact) { BufferState &state = _dataStore.getBufferState(bufferId); - _compacting_buffer[bufferId] = true; _mapping[bufferId].resize(state.get_used_arrays()); } } EntryRef move(EntryRef oldRef) override { RefT iRef(oldRef); - assert(iRef.valid()); uint32_t buffer_id = iRef.bufferId(); - if (_compacting_buffer[buffer_id]) { - auto &inner_mapping = _mapping[buffer_id]; - assert(iRef.unscaled_offset() < inner_mapping.size()); - EntryRef &mappedRef = inner_mapping[iRef.unscaled_offset()]; - assert(!mappedRef.valid()); - EntryRef newRef = _store.move(oldRef); - mappedRef = newRef; - return newRef; - } else { - return oldRef; - } + auto &inner_mapping = _mapping[buffer_id]; + assert(iRef.unscaled_offset() < inner_mapping.size()); + EntryRef &mappedRef = inner_mapping[iRef.unscaled_offset()]; + assert(!mappedRef.valid()); + EntryRef newRef = _store.move(oldRef); + mappedRef = newRef; + return newRef; } void fillMapping() { - _dict.move_entries(*this); + _dict.move_keys(*this, _compacting_buffer); } public: @@ -145,6 +138,7 @@ public: _bufferIdsToCompact(std::move(bufferIdsToCompact)) { if (!_bufferIdsToCompact.empty()) { + _compacting_buffer.add_buffers(_bufferIdsToCompact); allocMapping(); fillMapping(); } @@ -163,9 +157,9 @@ public: template <typename EntryT, typename RefT, typename Compare, typename Allocator> std::unique_ptr<typename UniqueStore<EntryT, RefT, Compare, Allocator>::Remapper> -UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(bool compact_memory, bool compact_address_space) +UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compact_memory, compact_address_space); + std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); if (bufferIdsToCompact.empty()) { return std::unique_ptr<Remapper>(); } else { @@ -184,7 +178,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::getMemoryUsage() const template <typename EntryT, typename RefT, typename Compare, typename Allocator> vespalib::AddressSpace -UniqueStore<EntryT, RefT, Compare, Allocator>::get_address_space_usage() const +UniqueStore<EntryT, RefT, Compare, Allocator>::get_values_address_space_usage() const { return _allocator.get_data_store().getAddressSpaceUsage(); } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h index fca8a98d280..d64588e3242 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h @@ -79,7 +79,7 @@ public: UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override; EntryRef find(const EntryComparator& comp) override; void remove(const EntryComparator& comp, EntryRef ref) override; - void move_entries(ICompactable& compactable) override; + void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override; uint32_t get_num_uniques() const override; vespalib::MemoryUsage get_memory_usage() const override; void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) override; @@ -91,7 +91,7 @@ public: vespalib::MemoryUsage get_btree_memory_usage() const override; vespalib::MemoryUsage get_hash_memory_usage() const override; bool has_held_buffers() const override; - void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) override; + void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) override; }; } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp index 08be861ba03..4375b38cf7c 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp @@ -4,6 +4,7 @@ #include "datastore.hpp" #include "entry_comparator_wrapper.h" +#include "entry_ref_filter.h" #include "i_compactable.h" #include "unique_store_add_result.h" #include "unique_store_dictionary.h" @@ -139,26 +140,27 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::remove(const template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_entries(ICompactable &compactable) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const EntryRefFilter& compacting_buffers) { if constexpr (has_btree_dictionary) { auto itr = this->_btree_dict.begin(); while (itr.valid()) { EntryRef oldRef(itr.getKey()); - EntryRef newRef(compactable.move(oldRef)); - if (newRef != oldRef) { + assert(oldRef.valid()); + if (compacting_buffers.has(oldRef)) { + EntryRef newRef(compactable.move(oldRef)); this->_btree_dict.thaw(itr); itr.writeKey(newRef); if constexpr (has_hash_dictionary) { - auto result = this->_hash_dict.find(this->_hash_dict.get_default_comparator(), oldRef); - assert(result != nullptr && result->first.load_relaxed() == oldRef); - result->first.store_release(newRef); - } + auto result = this->_hash_dict.find(this->_hash_dict.get_default_comparator(), oldRef); + assert(result != nullptr && result->first.load_relaxed() == oldRef); + result->first.store_release(newRef); + } } ++itr; } } else { - this->_hash_dict.move_keys([&compactable](EntryRef old_ref) { return compactable.move(old_ref); }); + this->_hash_dict.move_keys(compactable, compacting_buffers); } } @@ -337,11 +339,11 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::has_held_buff template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) { if constexpr (has_btree_dictionary) { if (compact_btree_dictionary) { - this->_btree_dict.compact_worst(); + this->_btree_dict.compact_worst(compaction_strategy); } } else { (void) compact_btree_dictionary; diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h index 4a8d72c8685..2501c4fafd9 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h @@ -3,6 +3,7 @@ #pragma once #include "entryref.h" +#include "entry_ref_filter.h" #include <vector> #include <vespa/vespalib/stllike/allocator.h> @@ -18,43 +19,35 @@ public: using RefType = RefT; protected: - std::vector<bool> _compacting_buffer; + EntryRefFilter _compacting_buffer; std::vector<std::vector<EntryRef, allocator_large<EntryRef>>> _mapping; public: UniqueStoreRemapper() - : _compacting_buffer(), + : _compacting_buffer(RefT::numBuffers(), RefT::offset_bits), _mapping() { } virtual ~UniqueStoreRemapper() = default; EntryRef remap(EntryRef ref) const { - if (ref.valid()) { - RefType internal_ref(ref); - if (!_compacting_buffer[internal_ref.bufferId()]) { - // No remapping for references to buffers not being compacted - return ref; - } else { - auto &inner_mapping = _mapping[internal_ref.bufferId()]; - assert(internal_ref.unscaled_offset() < inner_mapping.size()); - EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()]; - assert(mapped_ref.valid()); - return mapped_ref; - } - } else { - return EntryRef(); - } + RefType internal_ref(ref); + auto &inner_mapping = _mapping[internal_ref.bufferId()]; + assert(internal_ref.unscaled_offset() < inner_mapping.size()); + EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()]; + assert(mapped_ref.valid()); + return mapped_ref; } void remap(vespalib::ArrayRef<EntryRef> refs) const { for (auto &ref : refs) { - auto mapped_ref = remap(ref); - if (mapped_ref != ref) { - ref = mapped_ref; + if (ref.valid() && _compacting_buffer.has(ref)) { + ref = remap(ref); } } } + const EntryRefFilter& get_entry_ref_filter() const noexcept { return _compacting_buffer; } + virtual void done() = 0; }; diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/avx2.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/avx2.cpp index 6a6421ad016..590223ed13a 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/avx2.cpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/avx2.cpp @@ -11,6 +11,11 @@ Avx2Accelrator::populationCount(const uint64_t *a, size_t sz) const { } double +Avx2Accelrator::squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const { + return helper::squaredEuclideanDistance(a, b, sz); +} + +double Avx2Accelrator::squaredEuclideanDistance(const float * a, const float * b, size_t sz) const { return avx::euclideanDistanceSelectAlignment<float, 32>(a, b, sz); } diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/avx2.h b/vespalib/src/vespa/vespalib/hwaccelrated/avx2.h index 44752dd9270..2949e81fd36 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/avx2.h +++ b/vespalib/src/vespa/vespalib/hwaccelrated/avx2.h @@ -13,6 +13,7 @@ class Avx2Accelrator : public GenericAccelrator { public: size_t populationCount(const uint64_t *a, size_t sz) const override; + double squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const override; double squaredEuclideanDistance(const float * a, const float * b, size_t sz) const override; double squaredEuclideanDistance(const double * a, const double * b, size_t sz) const override; void and64(size_t offset, const std::vector<std::pair<const void *, bool>> &src, void *dest) const override; diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/avx512.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/avx512.cpp index 94a6637a072..5878165bb6d 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/avx512.cpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/avx512.cpp @@ -23,6 +23,11 @@ Avx512Accelrator::populationCount(const uint64_t *a, size_t sz) const { } double +Avx512Accelrator::squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const { + return helper::squaredEuclideanDistance(a, b, sz); +} + +double Avx512Accelrator::squaredEuclideanDistance(const float * a, const float * b, size_t sz) const { return avx::euclideanDistanceSelectAlignment<float, 64>(a, b, sz); } diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/avx512.h b/vespalib/src/vespa/vespalib/hwaccelrated/avx512.h index 826cf63be70..4989f72e698 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/avx512.h +++ b/vespalib/src/vespa/vespalib/hwaccelrated/avx512.h @@ -15,6 +15,7 @@ public: float dotProduct(const float * a, const float * b, size_t sz) const override; double dotProduct(const double * a, const double * b, size_t sz) const override; size_t populationCount(const uint64_t *a, size_t sz) const override; + double squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const override; double squaredEuclideanDistance(const float * a, const float * b, size_t sz) const override; double squaredEuclideanDistance(const double * a, const double * b, size_t sz) const override; void and64(size_t offset, const std::vector<std::pair<const void *, bool>> &src, void *dest) const override; diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/generic.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/generic.cpp index fb6ec167cf4..13946fa3398 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/generic.cpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/generic.cpp @@ -34,7 +34,7 @@ multiplyAdd(const T * a, const T * b, size_t sz) template <typename T, size_t UNROLL> double -euclideanDistanceT(const T * a, const T * b, size_t sz) +squaredEuclideanDistanceT(const T * a, const T * b, size_t sz) { T partial[UNROLL]; for (size_t i(0); i < UNROLL; i++) { @@ -43,11 +43,13 @@ euclideanDistanceT(const T * a, const T * b, size_t sz) size_t i(0); for (; i + UNROLL <= sz; i += UNROLL) { for (size_t j(0); j < UNROLL; j++) { - partial[j] += (a[i+j] - b[i+j]) * (a[i+j] - b[i+j]); + T d = a[i+j] - b[i+j]; + partial[j] += d * d; } } for (;i < sz; i++) { - partial[i%UNROLL] += (a[i] - b[i]) * (a[i] - b[i]); + T d = a[i] - b[i]; + partial[i%UNROLL] += d * d; } double sum(0); for (size_t j(0); j < UNROLL; j++) { @@ -156,13 +158,18 @@ GenericAccelrator::populationCount(const uint64_t *a, size_t sz) const { } double +GenericAccelrator::squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const { + return helper::squaredEuclideanDistance(a, b, sz); +} + +double GenericAccelrator::squaredEuclideanDistance(const float * a, const float * b, size_t sz) const { - return euclideanDistanceT<float, 8>(a, b, sz); + return squaredEuclideanDistanceT<float, 2>(a, b, sz); } double GenericAccelrator::squaredEuclideanDistance(const double * a, const double * b, size_t sz) const { - return euclideanDistanceT<double, 4>(a, b, sz); + return squaredEuclideanDistanceT<double, 2>(a, b, sz); } void diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/generic.h b/vespalib/src/vespa/vespalib/hwaccelrated/generic.h index c6b75bbcaf0..315e807da07 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/generic.h +++ b/vespalib/src/vespa/vespalib/hwaccelrated/generic.h @@ -23,6 +23,7 @@ public: void andNotBit(void * a, const void * b, size_t bytes) const override; void notBit(void * a, size_t bytes) const override; size_t populationCount(const uint64_t *a, size_t sz) const override; + double squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const override; double squaredEuclideanDistance(const float * a, const float * b, size_t sz) const override; double squaredEuclideanDistance(const double * a, const double * b, size_t sz) const override; void and64(size_t offset, const std::vector<std::pair<const void *, bool>> &src, void *dest) const override; diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp index b4f7eb5cd96..7407ffd6a4e 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp @@ -17,28 +17,18 @@ namespace vespalib::hwaccelrated { namespace { -class Factory { -public: - virtual ~Factory() = default; - virtual IAccelrated::UP create() const = 0; -}; - -class GenericFactory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<GenericAccelrator>(); } -}; - +IAccelrated::UP create_accelerator() { #ifdef __x86_64__ -class Avx2Factory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<Avx2Accelrator>(); } -}; - -class Avx512Factory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<Avx512Accelrator>(); } -}; + __builtin_cpu_init(); + if (__builtin_cpu_supports("avx512f")) { + return std::make_unique<Avx512Accelrator>(); + } + if (__builtin_cpu_supports("avx2")) { + return std::make_unique<Avx2Accelrator>(); + } #endif + return std::make_unique<GenericAccelrator>(); +} template<typename T> std::vector<T> createAndFill(size_t sz) { @@ -247,42 +237,14 @@ RuntimeVerificator::RuntimeVerificator() verify(thisCpu); } -class Selector -{ -public: - Selector() __attribute__((noinline)); - IAccelrated::UP create() { return _factory->create(); } -private: - std::unique_ptr<Factory> _factory; -}; - -Selector::Selector() : - _factory() -{ -#ifdef __x86_64__ - __builtin_cpu_init (); - if (__builtin_cpu_supports("avx512f")) { - _factory = std::make_unique<Avx512Factory>(); - } else if (__builtin_cpu_supports("avx2")) { - _factory = std::make_unique<Avx2Factory>(); - } else { - _factory = std::make_unique<GenericFactory>(); - } -#else - _factory = std::make_unique<GenericFactory>(); -#endif -} - } -static Selector _G_selector; - RuntimeVerificator _G_verifyAccelrator; const IAccelrated & IAccelrated::getAccelerator() { - static IAccelrated::UP accelrator = _G_selector.create(); + static IAccelrated::UP accelrator = create_accelerator(); return *accelrator; } diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.h b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.h index afb2024b322..6eae41ead4b 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.h +++ b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.h @@ -28,6 +28,7 @@ public: virtual void andNotBit(void * a, const void * b, size_t bytes) const = 0; virtual void notBit(void * a, size_t bytes) const = 0; virtual size_t populationCount(const uint64_t *a, size_t sz) const = 0; + virtual double squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) const = 0; virtual double squaredEuclideanDistance(const float * a, const float * b, size_t sz) const = 0; virtual double squaredEuclideanDistance(const double * a, const double * b, size_t sz) const = 0; // AND 64 bytes from multiple, optionally inverted sources diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/private_helpers.hpp b/vespalib/src/vespa/vespalib/hwaccelrated/private_helpers.hpp index 824e0e1ebd9..3b063ce6805 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/private_helpers.hpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/private_helpers.hpp @@ -74,5 +74,31 @@ orChunks(size_t offset, const std::vector<std::pair<const void *, bool>> & src, } } +template<typename TemporaryT=int32_t> +double squaredEuclideanDistanceT(const int8_t * a, const int8_t * b, size_t sz) __attribute__((noinline)); +template<typename TemporaryT> +double squaredEuclideanDistanceT(const int8_t * a, const int8_t * b, size_t sz) +{ + //Note that this is 3 times faster with int32_t than with int64_t and 16x faster than float + TemporaryT sum = 0; + for (size_t i(0); i < sz; i++) { + int16_t d = int16_t(a[i]) - int16_t(b[i]); + sum += d * d; + } + return sum; +} + +inline double +squaredEuclideanDistance(const int8_t * a, const int8_t * b, size_t sz) { + constexpr size_t LOOP_COUNT = 0x10000; + double sum(0); + size_t i=0; + for (; i + LOOP_COUNT <= sz; i += LOOP_COUNT) { + sum += squaredEuclideanDistanceT<int32_t>(a + i, b + i, LOOP_COUNT); + } + sum += squaredEuclideanDistanceT<int32_t>(a + i, b + i, sz - i); + return sum; +} + } } diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp index 149ad01b947..a476e23e6cb 100644 --- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp +++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp @@ -22,23 +22,29 @@ bool is_regex_special_char(char c) noexcept { case '\\': case '+': case '.': + case '?': + case '*': return true; default: return false; } } -std::string dot_separated_glob_to_regex(vespalib::stringref glob) { +// Important: `delimiter` MUST NOT be a character that needs escaping within a regex [charset] +template <bool SupportSingleCharMatch> +std::string char_delimited_glob_to_regex(vespalib::stringref glob, char delimiter) { std::string ret = "^"; ret.reserve(glob.size() + 2); + // Note: we explicitly stop matching at a delimiter boundary. + // This is to make path fragment matching less vulnerable to dirty tricks. + const std::string wildcard_pattern = std::string("[^") + delimiter + "]*"; + // Same applies for single chars; they should only match _within_ a delimited boundary. + const std::string single_char_pattern = std::string("[^") + delimiter + "]"; for (auto c : glob) { if (c == '*') { - // Note: we explicitly stop matching at a dot separator boundary. - // This is to make host name matching less vulnerable to dirty tricks. - ret += "[^.]*"; - } else if (c == '?') { - // Same applies for single chars; they should only match _within_ a dot boundary. - ret += "[^.]"; + ret += wildcard_pattern; + } else if (c == '?' && SupportSingleCharMatch) { + ret += single_char_pattern; } else { if (is_regex_special_char(c)) { ret += '\\'; @@ -52,14 +58,25 @@ std::string dot_separated_glob_to_regex(vespalib::stringref glob) { class RegexHostMatchPattern : public CredentialMatchPattern { Regex _pattern_as_regex; -public: - explicit RegexHostMatchPattern(vespalib::stringref glob_pattern) - : _pattern_as_regex(Regex::from_pattern(dot_separated_glob_to_regex(glob_pattern))) + explicit RegexHostMatchPattern(std::string_view glob_pattern) + : _pattern_as_regex(Regex::from_pattern(glob_pattern)) { } +public: + RegexHostMatchPattern(RegexHostMatchPattern&&) noexcept = default; ~RegexHostMatchPattern() override = default; - [[nodiscard]] bool matches(vespalib::stringref str) const override { + RegexHostMatchPattern& operator=(RegexHostMatchPattern&&) noexcept = default; + + [[nodiscard]] static RegexHostMatchPattern from_dns_glob_pattern(vespalib::stringref glob_pattern) { + return RegexHostMatchPattern(char_delimited_glob_to_regex<true>(glob_pattern, '.')); + } + + [[nodiscard]] static RegexHostMatchPattern from_uri_glob_pattern(vespalib::stringref glob_pattern) { + return RegexHostMatchPattern(char_delimited_glob_to_regex<false>(glob_pattern, '/')); + } + + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override { return _pattern_as_regex.full_match(std::string_view(str.data(), str.size())); } }; @@ -73,15 +90,19 @@ public: } ~ExactMatchPattern() override = default; - [[nodiscard]] bool matches(vespalib::stringref str) const override { + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override { return (str == _must_match_exactly); } }; } // anon ns -std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_glob(vespalib::stringref glob_pattern) { - return std::make_shared<const RegexHostMatchPattern>(glob_pattern); +std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_dns_glob(vespalib::stringref glob_pattern) { + return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_dns_glob_pattern(glob_pattern)); +} + +std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_uri_glob(vespalib::stringref glob_pattern) { + return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_uri_glob_pattern(glob_pattern)); } std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exact_match(vespalib::stringref str) { @@ -91,9 +112,8 @@ std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exa RequiredPeerCredential::RequiredPeerCredential(Field field, vespalib::string must_match_pattern) : _field(field), _original_pattern(std::move(must_match_pattern)), - // FIXME it's not RFC 2459-compliant to use exact-matching for URIs, but that's all we currently need. - _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_exact_match(_original_pattern) - : CredentialMatchPattern::create_from_glob(_original_pattern)) + _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_from_uri_glob(_original_pattern) + : CredentialMatchPattern::create_from_dns_glob(_original_pattern)) { } @@ -111,11 +131,21 @@ void print_joined(std::ostream& os, const Collection& coll, const char* sep) { os << e; } } + +constexpr const char* to_string(RequiredPeerCredential::Field field) noexcept { + switch (field) { + case RequiredPeerCredential::Field::CN: return "CN"; + case RequiredPeerCredential::Field::SAN_DNS: return "SAN_DNS"; + case RequiredPeerCredential::Field::SAN_URI: return "SAN_URI"; + default: abort(); + } +} + } std::ostream& operator<<(std::ostream& os, const RequiredPeerCredential& cred) { os << "RequiredPeerCredential(" - << (cred.field() == RequiredPeerCredential::Field::CN ? "CN" : "SAN_DNS") + << to_string(cred.field()) << " matches '" << cred.original_pattern() << "')"; diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h index c5721858518..4166efc4312 100644 --- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h +++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h @@ -10,9 +10,10 @@ namespace vespalib::net::tls { struct CredentialMatchPattern { virtual ~CredentialMatchPattern() = default; - [[nodiscard]] virtual bool matches(vespalib::stringref str) const = 0; + [[nodiscard]] virtual bool matches(vespalib::stringref str) const noexcept = 0; - static std::shared_ptr<const CredentialMatchPattern> create_from_glob(vespalib::stringref pattern); + static std::shared_ptr<const CredentialMatchPattern> create_from_dns_glob(vespalib::stringref glob_pattern); + static std::shared_ptr<const CredentialMatchPattern> create_from_uri_glob(vespalib::stringref glob_pattern); static std::shared_ptr<const CredentialMatchPattern> create_exact_match(vespalib::stringref pattern); }; @@ -37,7 +38,7 @@ public: && (_original_pattern == rhs._original_pattern)); } - [[nodiscard]] bool matches(vespalib::stringref str) const { + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept { return (_match_pattern && _match_pattern->matches(str)); } diff --git a/vespalib/src/vespa/vespalib/stllike/string.cpp b/vespalib/src/vespa/vespalib/stllike/string.cpp index 67e89d013cc..460bb0d39c6 100644 --- a/vespalib/src/vespa/vespalib/stllike/string.cpp +++ b/vespalib/src/vespa/vespalib/stllike/string.cpp @@ -100,4 +100,9 @@ template string operator + (stringref a, const string & b); template string operator + (const string & a, const char * b); template string operator + (const char * a, const string & b); +const string &empty_string() { + static string empty; + return empty; +} + } diff --git a/vespalib/src/vespa/vespalib/stllike/string.h b/vespalib/src/vespa/vespalib/stllike/string.h index 79c9d50b164..2c0bb9a85d3 100644 --- a/vespalib/src/vespa/vespalib/stllike/string.h +++ b/vespalib/src/vespa/vespalib/stllike/string.h @@ -675,6 +675,9 @@ inline bool ends_with(stringref text, stringref key) { return false; } +// returns a reference to a shared empty string +const string &empty_string(); + /** * Utility function to format an unsigned integer into a new * string instance. diff --git a/vespalib/src/vespa/vespalib/util/CMakeLists.txt b/vespalib/src/vespa/vespalib/util/CMakeLists.txt index 75ea02d448e..58f6a93babc 100644 --- a/vespalib/src/vespa/vespalib/util/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/util/CMakeLists.txt @@ -30,6 +30,7 @@ vespa_add_library(vespalib_vespalib_util OBJECT generationholder.cpp hdr_abort.cpp host_name.cpp + invokeserviceimpl.cpp issue.cpp joinable.cpp latch.cpp diff --git a/vespalib/src/vespa/vespalib/util/executor.h b/vespalib/src/vespa/vespalib/util/executor.h index 6ef8f182ec4..6346b51c2ab 100644 --- a/vespalib/src/vespa/vespalib/util/executor.h +++ b/vespalib/src/vespa/vespalib/util/executor.h @@ -7,11 +7,23 @@ namespace vespalib { /** + * Interface for componets that can benefit from regular wakeup calls. + */ +class IWakeup { +public: + virtual ~IWakeup() = default; + /** + * In case you have a lazy executor that naps inbetween. + **/ + virtual void wakeup() = 0; +}; + +/** * An executor decouples the execution of a task from the request of * executing that task. Also, tasks are typically executed * concurrently in multiple threads. **/ -class Executor +class Executor : public IWakeup { public: /** @@ -37,10 +49,6 @@ public: **/ virtual Task::UP execute(Task::UP task) = 0; - /** - * In case you have a lazy executor that naps inbetween. - **/ - virtual void wakeup() = 0; virtual ~Executor() = default; }; diff --git a/vespalib/src/vespa/vespalib/util/invokeservice.h b/vespalib/src/vespa/vespalib/util/invokeservice.h new file mode 100644 index 00000000000..3e3973234d1 --- /dev/null +++ b/vespalib/src/vespa/vespalib/util/invokeservice.h @@ -0,0 +1,20 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "idestructorcallback.h" +#include <functional> + +namespace vespalib { + +/** + * Interface to register for receiving regular invoke calls. + * The registration will last as long as the returned object is kept alive. + **/ +class InvokeService { +public: + virtual ~InvokeService() = default; + virtual std::unique_ptr<IDestructorCallback> registerInvoke(std::function<void()> func) = 0; +}; + +} diff --git a/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp new file mode 100644 index 00000000000..ffa0825c950 --- /dev/null +++ b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp @@ -0,0 +1,86 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "invokeserviceimpl.h" +#include <cassert> + +namespace vespalib { + +InvokeServiceImpl::InvokeServiceImpl(duration napTime) + : _naptime(napTime), + _lock(), + _currId(0), + _closed(false), + _toInvoke(), + _thread() +{ +} + +InvokeServiceImpl::~InvokeServiceImpl() +{ + { + std::lock_guard guard(_lock); + assert(_toInvoke.empty()); + _closed = true; + } + if (_thread) { + _thread->join(); + } +} + +class InvokeServiceImpl::Registration : public IDestructorCallback { +public: + Registration(InvokeServiceImpl * service, uint64_t id) noexcept + : _service(service), + _id(id) + { } + Registration(const Registration &) = delete; + Registration & operator=(const Registration &) = delete; + ~Registration() override{ + _service->unregister(_id); + } +private: + InvokeServiceImpl * _service; + uint64_t _id; +}; + +std::unique_ptr<IDestructorCallback> +InvokeServiceImpl::registerInvoke(VoidFunc func) { + std::lock_guard guard(_lock); + uint64_t id = _currId++; + _toInvoke.emplace_back(id, std::move(func)); + if ( ! _thread) { + _thread = std::make_unique<std::thread>([this]() { runLoop(); }); + } + return std::make_unique<Registration>(this, id); +} + +void +InvokeServiceImpl::unregister(uint64_t id) { + std::lock_guard guard(_lock); + auto found = std::find_if(_toInvoke.begin(), _toInvoke.end(), [id](const std::pair<uint64_t, VoidFunc> & a) { + return id == a.first; + }); + assert (found != _toInvoke.end()); + _toInvoke.erase(found); +} + +void +InvokeServiceImpl::runLoop() { + bool done = false; + while ( ! done ) { + { + std::lock_guard guard(_lock); + for (auto & func: _toInvoke) { + func.second(); + } + done = _closed; + } + if ( ! done) { + std::this_thread::sleep_for(_naptime); + } + } + +} + +} + diff --git a/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h new file mode 100644 index 00000000000..3b0c7690731 --- /dev/null +++ b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h @@ -0,0 +1,36 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "invokeservice.h" +#include "time.h" +#include <mutex> +#include <vector> +#include <thread> + +namespace vespalib { + +/** + * An invoke service what will invoke the given function with at specified frequency. + */ +class InvokeServiceImpl : public InvokeService { + using VoidFunc = std::function<void()>; +public: + InvokeServiceImpl(duration napTime); + InvokeServiceImpl(const InvokeServiceImpl &) = delete; + InvokeServiceImpl & operator=(const InvokeServiceImpl &) = delete; + ~InvokeServiceImpl() override; + std::unique_ptr<IDestructorCallback> registerInvoke(VoidFunc func) override; +private: + class Registration; + void unregister(uint64_t id); + void runLoop(); + duration _naptime; + std::mutex _lock; + uint64_t _currId; + bool _closed; + std::vector<std::pair<uint64_t, VoidFunc>> _toInvoke; + std::unique_ptr<std::thread> _thread; +}; + +} diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.h b/vespalib/src/vespa/vespalib/util/rcuvector.h index 0396ee0d459..dd4fa660279 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.h +++ b/vespalib/src/vespa/vespalib/util/rcuvector.h @@ -13,10 +13,10 @@ namespace vespalib { template <typename T> class RcuVectorHeld : public GenerationHeldBase { - std::unique_ptr<T> _data; + T _data; public: - RcuVectorHeld(size_t size, std::unique_ptr<T> data); + RcuVectorHeld(size_t size, T&& data); ~RcuVectorHeld(); }; @@ -121,7 +121,7 @@ public: void reset(); void shrink(size_t newSize) __attribute__((noinline)); - void replaceVector(std::unique_ptr<ArrayType> replacement); + void replaceVector(ArrayType replacement); }; template <typename T> diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.hpp b/vespalib/src/vespa/vespalib/util/rcuvector.hpp index 9d7c8ea57d6..3c455149dfd 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.hpp +++ b/vespalib/src/vespa/vespalib/util/rcuvector.hpp @@ -9,7 +9,7 @@ namespace vespalib { template <typename T> -RcuVectorHeld<T>::RcuVectorHeld(size_t size, std::unique_ptr<T> data) +RcuVectorHeld<T>::RcuVectorHeld(size_t size, T&& data) : GenerationHeldBase(size), _data(std::move(data)) { } @@ -52,20 +52,21 @@ RcuVectorBase<T>::~RcuVectorBase() = default; template <typename T> void RcuVectorBase<T>::expand(size_t newCapacity) { - std::unique_ptr<ArrayType> tmpData(new ArrayType()); - tmpData->reserve(newCapacity); + ArrayType tmpData; + tmpData.reserve(newCapacity); for (const T & v : _data) { - tmpData->push_back_fast(v); + tmpData.push_back_fast(v); } replaceVector(std::move(tmpData)); } template <typename T> void -RcuVectorBase<T>::replaceVector(std::unique_ptr<ArrayType> replacement) { - replacement->swap(_data); // atomic switch of underlying data - size_t holdSize = replacement->capacity() * sizeof(T); - GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(replacement))); +RcuVectorBase<T>::replaceVector(ArrayType replacement) { + std::atomic_thread_fence(std::memory_order_release); + replacement.swap(_data); // atomic switch of underlying data + size_t holdSize = replacement.capacity() * sizeof(T); + auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(replacement)); _genHolder.hold(std::move(hold)); onReallocation(); } @@ -90,17 +91,18 @@ RcuVectorBase<T>::shrink(size_t newSize) return; } if (!_data.try_unreserve(wantedCapacity)) { - std::unique_ptr<ArrayType> tmpData(new ArrayType()); - tmpData->reserve(wantedCapacity); - tmpData->resize(newSize); + ArrayType tmpData; + tmpData.reserve(wantedCapacity); + tmpData.resize(newSize); for (uint32_t i = 0; i < newSize; ++i) { - (*tmpData)[i] = _data[i]; + tmpData[i] = _data[i]; } + std::atomic_thread_fence(std::memory_order_release); // Users of RCU vector must ensure that no readers use old size // after swap. Attribute vectors uses _committedDocIdLimit for this. - tmpData->swap(_data); // atomic switch of underlying data - size_t holdSize = tmpData->capacity() * sizeof(T); - GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(tmpData))); + tmpData.swap(_data); // atomic switch of underlying data + size_t holdSize = tmpData.capacity() * sizeof(T); + auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(tmpData)); _genHolder.hold(std::move(hold)); onReallocation(); } diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp index 80bbb3a7ad2..ab83d4e05fd 100644 --- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp +++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp @@ -8,6 +8,8 @@ using namespace vespalib::fixed_thread_bundle; namespace vespalib { +VESPA_THREAD_STACK_TAG(simple_thread_bundle_executor); + namespace { struct SignalHook : Runnable { @@ -43,7 +45,7 @@ Runnable::UP wrap(Runnable *runnable) { } Runnable::UP chain(Runnable::UP first, Runnable::UP second) { - return Runnable::UP(new HookPair(std::move(first), std::move(second))); + return std::make_unique<HookPair>(std::move(first), std::move(second)); } } // namespace vespalib::<unnamed> @@ -173,4 +175,19 @@ SimpleThreadBundle::run(const std::vector<Runnable*> &targets) latch.await(); } +SimpleThreadBundle::Worker::Worker(Signal &s, Runnable::UP h) + : thread(*this, simple_thread_bundle_executor), + signal(s), + hook(std::move(h)) +{ + thread.start(); +} +void +SimpleThreadBundle::Worker::run() { + for (size_t gen = 0; signal.wait(gen) > 0; ) { + hook->run(); +} + +} + } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h index f0aaccc2525..d9a29ee7bef 100644 --- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h +++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h @@ -112,14 +112,8 @@ private: Thread thread; Signal &signal; Runnable::UP hook; - Worker(Signal &s, Runnable::UP h) : thread(*this), signal(s), hook(std::move(h)) { - thread.start(); - } - void run() override { - for (size_t gen = 0; signal.wait(gen) > 0; ) { - hook->run(); - } - } + Worker(Signal &s, Runnable::UP h); + void run() override; }; Work _work; diff --git a/vespalib/src/vespa/vespalib/util/thread.cpp b/vespalib/src/vespa/vespalib/util/thread.cpp index c02a7a3b063..c3230bf313d 100644 --- a/vespalib/src/vespa/vespalib/util/thread.cpp +++ b/vespalib/src/vespa/vespalib/util/thread.cpp @@ -9,9 +9,9 @@ namespace vespalib { __thread Thread *Thread::_currentThread = nullptr; -Thread::Proxy::Proxy(Thread &parent, Runnable &target) - : thread(parent), runnable(target), - start(), started(), cancel(false) +Thread::Proxy::Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in) + : thread(parent), runnable(target), init_fun(std::move(init_fun_in)), + start(), started(), cancel(false) { } void @@ -22,7 +22,7 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *) start.await(); if (!cancel) { started.countDown(); - runnable.run(); + init_fun(runnable); } assert(_currentThread == &thread); _currentThread = nullptr; @@ -30,8 +30,8 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *) Thread::Proxy::~Proxy() = default; -Thread::Thread(Runnable &runnable) - : _proxy(*this, runnable), +Thread::Thread(Runnable &runnable, init_fun_t init_fun_in) + : _proxy(*this, runnable, std::move(init_fun_in)), _pool(STACK_SIZE, 1), _lock(), _cond(), diff --git a/vespalib/src/vespa/vespalib/util/thread.h b/vespalib/src/vespa/vespalib/util/thread.h index 8873f23ee98..e08f3ca1100 100644 --- a/vespalib/src/vespa/vespalib/util/thread.h +++ b/vespalib/src/vespa/vespalib/util/thread.h @@ -15,17 +15,19 @@ namespace vespalib { class Thread : public Active { private: + using init_fun_t = Runnable::init_fun_t; enum { STACK_SIZE = 256*1024 }; static __thread Thread *_currentThread; struct Proxy : FastOS_Runnable { Thread &thread; Runnable &runnable; + init_fun_t init_fun; vespalib::Gate start; vespalib::Gate started; bool cancel; - Proxy(Thread &parent, Runnable &target); + Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in); ~Proxy() override; void Run(FastOS_ThreadInterface *thisThread, void *arguments) override; @@ -39,7 +41,7 @@ private: bool _woken; public: - Thread(Runnable &runnable); + Thread(Runnable &runnable, init_fun_t init_fun_in); ~Thread() override; void start() override; Thread &stop() override; diff --git a/vespalog/src/logger/runserver.cpp b/vespalog/src/logger/runserver.cpp index c74806a8b5b..f9dde87ca14 100644 --- a/vespalog/src/logger/runserver.cpp +++ b/vespalog/src/logger/runserver.cpp @@ -54,13 +54,13 @@ bool whole_seconds(int cnt, int secs) { class PidFile { private: - char *_pidfile; + std::string _pidfile; int _fd; PidFile(const PidFile&); PidFile& operator= (const PidFile&); public: - PidFile(const char *pidfile) : _pidfile(strdup(pidfile)), _fd(-1) {} - ~PidFile() { free(_pidfile); if (_fd >= 0) close(_fd); } + PidFile(const char *pidfile) : _pidfile(pidfile), _fd(-1) {} + ~PidFile() { if (_fd >= 0) close(_fd); } int readPid(); void writePid(); bool writeOpen(); @@ -72,7 +72,7 @@ public: void PidFile::cleanUp() { - if (!anotherRunning()) remove(_pidfile); + if (!anotherRunning()) remove(_pidfile.c_str()); if (_fd >= 0) close(_fd); _fd = -1; } @@ -82,14 +82,14 @@ PidFile::writeOpen() { if (_fd >= 0) close(_fd); int flags = O_CREAT | O_WRONLY | O_NONBLOCK; - _fd = open(_pidfile, flags, 0644); + _fd = open(_pidfile.c_str(), flags, 0644); if (_fd < 0) { - fprintf(stderr, "could not create pidfile %s: %s\n", _pidfile, + fprintf(stderr, "could not create pidfile %s: %s\n", _pidfile.c_str(), strerror(errno)); return false; } if (flock(_fd, LOCK_EX | LOCK_NB) != 0) { - fprintf(stderr, "could not lock pidfile %s: %s\n", _pidfile, + fprintf(stderr, "could not lock pidfile %s: %s\n", _pidfile.c_str(), strerror(errno)); close(_fd); _fd = -1; @@ -106,7 +106,7 @@ PidFile::writePid() int didtruncate = ftruncate(_fd, (off_t)0); if (didtruncate != 0) { fprintf(stderr, "could not truncate pid file %s: %s\n", - _pidfile, strerror(errno)); + _pidfile.c_str(), strerror(errno)); std::_Exit(1); } char buf[100]; @@ -115,16 +115,16 @@ PidFile::writePid() ssize_t didw = write(_fd, buf, l); if (didw != l) { fprintf(stderr, "could not write pid to %s: %s\n", - _pidfile, strerror(errno)); + _pidfile.c_str(), strerror(errno)); std::_Exit(1); } - LOG(debug, "wrote '%s' to %s (fd %d)", buf, _pidfile, _fd); + LOG(debug, "wrote '%s' to %s (fd %d)", buf, _pidfile.c_str(), _fd); } int PidFile::readPid() { - FILE *pf = fopen(_pidfile, "r"); + FILE *pf = fopen(_pidfile.c_str(), "r"); if (pf == NULL) return 0; char buf[100]; strcpy(buf, "0"); @@ -151,7 +151,7 @@ bool PidFile::canStealLock() { int flags = O_WRONLY | O_NONBLOCK; - int desc = open(_pidfile, flags, 0644); + int desc = open(_pidfile.c_str(), flags, 0644); if (desc < 0) { return false; } diff --git a/vespalog/src/vespa/log/control-file.cpp b/vespalog/src/vespa/log/control-file.cpp index 77ad1d0ec73..2096dd1531c 100644 --- a/vespalog/src/vespa/log/control-file.cpp +++ b/vespalog/src/vespa/log/control-file.cpp @@ -5,7 +5,6 @@ #include <ctype.h> #include <cstdio> #include <sys/mman.h> -#include <sys/stat.h> #include <errno.h> #include <unistd.h> #include <memory> @@ -28,7 +27,7 @@ ControlFile::ControlFile(const char *file, Mode mode) : (O_RDWR | O_CREAT))), _fileSize(0), _mode(mode), - _fileName(strdup(file)), + _fileName(file), _prefix(0), _mapBase(0), _mappedSize(0), @@ -43,7 +42,6 @@ ControlFile::ControlFile(const char *file, Mode mode) ControlFile::~ControlFile() { freeMapping(); - free(_fileName); } void @@ -168,7 +166,7 @@ ControlFile::extendMapping() if (fileLen == -1) { _fileBacking.unlock(); - LOG(error, "Cannot get file size of '%s': %s", _fileName, + LOG(error, "Cannot get file size of '%s': %s", _fileName.c_str(), strerror(errno)); return false; } @@ -273,14 +271,14 @@ ControlFile::getLevels(const char *name) strcat(appendedString, "\n"); int len = strlen(appendedString); - int fd = open(_fileName, O_WRONLY | O_APPEND); + int fd = open(_fileName.c_str(), O_WRONLY | O_APPEND); int wlen = write(fd, appendedString, len); oldFileLength = lseek(fd, (off_t)0, SEEK_CUR) - wlen; close(fd); if (wlen != len) { _fileBacking.unlock(); LOG(error, "Writing to control file '%s' fails (%d/%d bytes): %s", - _fileName, wlen, len, strerror(errno)); + _fileName.c_str(), wlen, len, strerror(errno)); return reinterpret_cast<unsigned int *>(inheritLevels); } else { _fileSize = _fileBacking.size(); @@ -290,7 +288,7 @@ ControlFile::getLevels(const char *name) if (!extendMapping()) { _fileBacking.unlock(); // just for sure LOG(error, "Failed to extend mapping of '%s', losing runtime " - "configurability of component '%s'", _fileName, name); + "configurability of component '%s'", _fileName.c_str(), name); return defaultLevels(); } } diff --git a/vespalog/src/vespa/log/control-file.h b/vespalog/src/vespa/log/control-file.h index 6f302c7a97c..31c15077c8c 100644 --- a/vespalog/src/vespa/log/control-file.h +++ b/vespalog/src/vespa/log/control-file.h @@ -2,8 +2,8 @@ #pragma once #include "log.h" -#include "control-file.h" #include "lock.h" +#include <string> namespace ns_log { @@ -19,7 +19,7 @@ private: Lock _fileBacking; int _fileSize; enum Mode _mode; - char *_fileName; + std::string _fileName; void ensureHeader(); bool hasPrefix() { return (_prefix != NULL && _prefix[0] != '\0' && diff --git a/vespalog/src/vespa/log/internal.h b/vespalog/src/vespa/log/internal.h index c9081b72ce9..4411d9fa6e6 100644 --- a/vespalog/src/vespa/log/internal.h +++ b/vespalog/src/vespa/log/internal.h @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include <cstring> +#include <string> #include <cstdlib> #if !__GNUC__ && !defined(__attribute__) @@ -15,15 +15,14 @@ void throwInvalid(const char *fmt, ...) class InvalidLogException { private: - char *_what; - InvalidLogException& operator = (const InvalidLogException&); + std::string _what; public: - InvalidLogException(const InvalidLogException &x) : - _what(strdup(x._what)) {} - InvalidLogException(const char *s) : _what(strdup(s)) {} - ~InvalidLogException() { free(_what); } - const char *what() const { return _what; } + InvalidLogException& operator = (const InvalidLogException&) = delete; + InvalidLogException(const InvalidLogException &x) = default; + InvalidLogException(const char *s) : _what(s) {} + ~InvalidLogException() = default; + const char *what() const { return _what.c_str(); } }; } // end namespace ns_log |