diff options
author | Harald Musum <musum@yahooinc.com> | 2021-12-11 14:30:12 +0100 |
---|---|---|
committer | Harald Musum <musum@yahooinc.com> | 2021-12-11 14:30:12 +0100 |
commit | bc117aa5fd1aaa54ae8c86c103899bc81b5d481f (patch) | |
tree | a5dfea5fba4650f3f812bdc9cafae6ace4cae17d | |
parent | c85d5d3ab13b01fd9f66629b5439bb488afd168c (diff) | |
parent | 60b142c007083c773e910b44cc57d65e7f2c9274 (diff) |
Merge branch 'master' into revert-20366-revert-20350-hmusum/config-subscription-refactoring-part-5
458 files changed, 6209 insertions, 4054 deletions
diff --git a/application/abi-spec.json b/application/abi-spec.json index 5c298471b9c..21eacb152c0 100644 --- a/application/abi-spec.json +++ b/application/abi-spec.json @@ -93,7 +93,9 @@ "public void <init>()", "public void <init>(com.yahoo.application.MockApplicationConfig)", "public com.yahoo.application.MockApplicationConfig$Builder mystruct(com.yahoo.application.MockApplicationConfig$Mystruct$Builder)", + "public com.yahoo.application.MockApplicationConfig$Builder mystruct(java.util.function.Consumer)", "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(com.yahoo.application.MockApplicationConfig$Mystructlist$Builder)", + "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(java.util.function.Consumer)", "public com.yahoo.application.MockApplicationConfig$Builder mystructlist(java.util.List)", "public com.yahoo.application.MockApplicationConfig$Builder mylist(java.lang.String)", "public com.yahoo.application.MockApplicationConfig$Builder mylist(java.util.Collection)", @@ -101,6 +103,7 @@ "public com.yahoo.application.MockApplicationConfig$Builder mymap(java.util.Map)", "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.lang.String, com.yahoo.application.MockApplicationConfig$Mymapstruct$Builder)", "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.util.Map)", + "public com.yahoo.application.MockApplicationConfig$Builder mymapstruct(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -347,7 +350,7 @@ "public final com.yahoo.processing.Response process(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)", "protected abstract com.yahoo.processing.Response doProcess(com.yahoo.component.chain.Chain, com.yahoo.processing.Request)", "public final byte[] processAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)", - "protected abstract com.google.common.util.concurrent.ListenableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)", + "protected abstract java.util.concurrent.CompletableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)", "protected com.yahoo.component.chain.Chain getChain(com.yahoo.component.ComponentSpecification)", "protected final com.yahoo.processing.rendering.Renderer getRenderer(com.yahoo.component.ComponentSpecification)", "protected abstract com.yahoo.processing.rendering.Renderer doGetRenderer(com.yahoo.component.ComponentSpecification)" diff --git a/application/src/main/java/com/yahoo/application/container/Processing.java b/application/src/main/java/com/yahoo/application/container/Processing.java index 1f96fe2294b..4ca367ea720 100644 --- a/application/src/main/java/com/yahoo/application/container/Processing.java +++ b/application/src/main/java/com/yahoo/application/container/Processing.java @@ -2,7 +2,6 @@ package com.yahoo.application.container; import com.yahoo.api.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; @@ -15,6 +14,7 @@ import com.yahoo.processing.rendering.Renderer; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Einar M R Rosenvinge @@ -41,14 +41,14 @@ public final class Processing extends ProcessingBase<Request, Response, Processo } @Override - protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - Request request, - Renderer<Response> renderer, - ByteArrayOutputStream stream) throws IOException { + protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + Request request, + Renderer<Response> renderer, + ByteArrayOutputStream stream) throws IOException { Execution execution = handler.createExecution(getChain(chainSpec), request); Response response = execution.process(request); - return renderer.render(stream, response, execution, request); + return renderer.renderResponse(stream, response, execution, request); } @Override diff --git a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java index 2b4ea822d03..96866b94e29 100644 --- a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java +++ b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java @@ -2,20 +2,18 @@ package com.yahoo.application.container; import com.yahoo.api.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.chain.ChainRegistry; -import com.yahoo.processing.rendering.AsynchronousRenderer; import com.yahoo.processing.rendering.Renderer; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; /** * @author gjoranv @@ -45,13 +43,13 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R REQUEST request) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); Renderer<RESPONSE> renderer = getRenderer(rendererSpec); - ListenableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream); + CompletableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream); awaitFuture(renderTask); return stream.toByteArray(); } - private void awaitFuture(ListenableFuture<Boolean> renderTask) { + private void awaitFuture(CompletableFuture<Boolean> renderTask) { try { renderTask.get(); } catch (InterruptedException | ExecutionException e) { @@ -59,10 +57,10 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R } } - protected abstract ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - REQUEST request, - Renderer<RESPONSE> renderer, - ByteArrayOutputStream stream) throws IOException ; + protected abstract CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + REQUEST request, + Renderer<RESPONSE> renderer, + ByteArrayOutputStream stream) throws IOException ; protected Chain<PROCESSOR> getChain(ComponentSpecification chainSpec) { Chain<PROCESSOR> chain = getChains().getComponent(chainSpec); diff --git a/application/src/main/java/com/yahoo/application/container/Search.java b/application/src/main/java/com/yahoo/application/container/Search.java index 3535b660b78..6a2f728fbcc 100644 --- a/application/src/main/java/com/yahoo/application/container/Search.java +++ b/application/src/main/java/com/yahoo/application/container/Search.java @@ -2,7 +2,6 @@ package com.yahoo.application.container; import com.yahoo.api.annotations.Beta; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentSpecification; import com.yahoo.component.chain.Chain; import com.yahoo.processing.execution.chain.ChainRegistry; @@ -12,10 +11,10 @@ import com.yahoo.search.Result; import com.yahoo.search.Searcher; import com.yahoo.search.handler.HttpSearchResponse; import com.yahoo.search.handler.SearchHandler; -import com.yahoo.search.searchchain.SearchChainRegistry; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Einar M R Rosenvinge @@ -41,12 +40,12 @@ public final class Search extends ProcessingBase<Query, Result, Searcher> { } @Override - protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, - Query request, - Renderer<Result> renderer, - ByteArrayOutputStream stream) throws IOException { + protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec, + Query request, + Renderer<Result> renderer, + ByteArrayOutputStream stream) throws IOException { Result result = process(chainSpec, request); - return HttpSearchResponse.waitableRender(result, result.getQuery(), renderer, stream); + return HttpSearchResponse.asyncRender(result, result.getQuery(), renderer, stream); } @Override diff --git a/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java b/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java index d8c86728160..d069b345b93 100644 --- a/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java +++ b/application/src/test/java/com/yahoo/application/container/docprocs/MockDispatchDocproc.java @@ -1,10 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.application.container.docprocs; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.docproc.DocumentProcessor; import com.yahoo.docproc.Processing; -import com.yahoo.document.Document; import com.yahoo.document.DocumentOperation; import com.yahoo.document.DocumentPut; import com.yahoo.documentapi.messagebus.protocol.DocumentMessage; @@ -40,7 +38,7 @@ public class MockDispatchDocproc extends DocumentProcessor { public Progress process(Processing processing) { for (DocumentOperation op : processing.getDocumentOperations()) { PutDocumentMessage message = new PutDocumentMessage((DocumentPut)op); - ListenableFuture<Response> future = createRequest(message).dispatch(); + var future = createRequest(message).dispatch(); try { responses.add(future.get()); } catch (ExecutionException | InterruptedException e) { diff --git a/client/go/cmd/prod.go b/client/go/cmd/prod.go index 89dc4cb6094..c686f1d29ad 100644 --- a/client/go/cmd/prod.go +++ b/client/go/cmd/prod.go @@ -142,11 +142,10 @@ $ vespa prod submit`, if pkg.TestPath == "" { fatalErrHint(fmt.Errorf("No tests found"), "The application must be a Java maven project, or include basic HTTP tests under src/test/application/", - "See https://cloud.vespa.ai/en/reference/getting-to-production") + "See https://cloud.vespa.ai/en/getting-to-production") return - } else { - verifyTests(pkg.TestPath, target) } + verifyTests(pkg.TestPath, target) isCI := os.Getenv("CI") != "" if !isCI { fmt.Fprintln(stderr, color.Yellow("Warning:"), "We recommend doing this only from a CD job") @@ -352,10 +351,26 @@ func prompt(r *bufio.Reader, question, defaultAnswer string, validator func(inpu } func verifyTests(testsParent string, target vespa.Target) { - runTests(filepath.Join(testsParent, "tests", "system-test"), target, true) - runTests(filepath.Join(testsParent, "tests", "staging-setup"), target, true) - runTests(filepath.Join(testsParent, "tests", "staging-test"), target, true) - if util.PathExists(filepath.Join(testsParent, "tests", "production-test")) { - runTests(filepath.Join(testsParent, "tests", "production-test"), target, true) + verifyTest(testsParent, "system-test", target, true) + verifyTest(testsParent, "staging-setup", target, true) + verifyTest(testsParent, "staging-test", target, true) + verifyTest(testsParent, "production-test", target, false) +} + +func verifyTest(testsParent string, suite string, target vespa.Target, required bool) { + testDirectory := filepath.Join(testsParent, "tests", suite) + _, err := os.Stat(testDirectory) + if err != nil { + if required { + if errors.Is(err, os.ErrNotExist) { + fatalErrHint(fmt.Errorf("No %s tests found", suite), + fmt.Sprintf("No such directory: %s", testDirectory), + "See https://cloud.vespa.ai/en/reference/testing") + } + fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") + } + return } + + runTests(testDirectory, true) } diff --git a/client/go/cmd/test.go b/client/go/cmd/test.go index 7ba7a19b235..262b57eff33 100644 --- a/client/go/cmd/test.go +++ b/client/go/cmd/test.go @@ -17,7 +17,6 @@ import ( "net/http" "net/url" "os" - "path" "path/filepath" "strings" "time" @@ -29,25 +28,19 @@ func init() { } var testCmd = &cobra.Command{ - Use: "test [tests directory or test file]", + Use: "test <tests directory or test file>", Short: "Run a test suite, or a single test", Long: `Run a test suite, or a single test -Runs all JSON test files in the specified directory (the working -directory by default), or the single JSON test file specified. +Runs all JSON test files in the specified directory, or the single JSON test file specified. See https://cloud.vespa.ai/en/reference/testing.html for details.`, Example: `$ vespa test src/test/application/tests/system-test $ vespa test src/test/application/tests/system-test/feed-and-query.json`, - Args: cobra.MaximumNArgs(1), + Args: cobra.ExactArgs(1), DisableAutoGenTag: true, Run: func(cmd *cobra.Command, args []string) { - target := getTarget() - testPath := "." - if len(args) > 0 { - testPath = args[0] - } - if count, failed := runTests(testPath, target, false); len(failed) != 0 { + if count, failed := runTests(args[0], false); len(failed) != 0 { plural := "s" if count == 1 { plural = "" @@ -67,7 +60,7 @@ $ vespa test src/test/application/tests/system-test/feed-and-query.json`, }, } -func runTests(rootPath string, target vespa.Target, dryRun bool) (int, []string) { +func runTests(rootPath string, dryRun bool) (int, []string) { count := 0 failed := make([]string, 0) if stat, err := os.Stat(rootPath); err != nil { @@ -77,15 +70,16 @@ func runTests(rootPath string, target vespa.Target, dryRun bool) (int, []string) if err != nil { fatalErrHint(err, "See https://cloud.vespa.ai/en/reference/testing") } + context := testContext{testsPath: rootPath, dryRun: dryRun} previousFailed := false for _, test := range tests { if !test.IsDir() && filepath.Ext(test.Name()) == ".json" { - testPath := path.Join(rootPath, test.Name()) + testPath := filepath.Join(rootPath, test.Name()) if previousFailed { fmt.Fprintln(stdout, "") previousFailed = false } - failure := runTest(testPath, target, dryRun) + failure := runTest(testPath, context) if failure != "" { failed = append(failed, failure) previousFailed = true @@ -94,7 +88,7 @@ func runTests(rootPath string, target vespa.Target, dryRun bool) (int, []string) } } } else if strings.HasSuffix(stat.Name(), ".json") { - failure := runTest(rootPath, target, dryRun) + failure := runTest(rootPath, testContext{testsPath: filepath.Dir(rootPath), dryRun: dryRun}) if failure != "" { failed = append(failed, failure) } @@ -107,7 +101,7 @@ func runTests(rootPath string, target vespa.Target, dryRun bool) (int, []string) } // Runs the test at the given path, and returns the specified test name if the test fails -func runTest(testPath string, target vespa.Target, dryRun bool) string { +func runTest(testPath string, context testContext) string { var test test testBytes, err := ioutil.ReadFile(testPath) if err != nil { @@ -121,11 +115,11 @@ func runTest(testPath string, target vespa.Target, dryRun bool) string { if test.Name == "" { testName = filepath.Base(testPath) } - if !dryRun { - fmt.Fprintf(stdout, "Running %s:", color.Cyan(testName)) + if !context.dryRun { + fmt.Fprintf(stdout, "%s:", testName) } - defaultParameters, err := getParameters(test.Defaults.ParametersRaw, path.Dir(testPath)) + defaultParameters, err := getParameters(test.Defaults.ParametersRaw, filepath.Dir(testPath)) if err != nil { fmt.Fprintln(stderr) fatalErrHint(err, fmt.Sprintf("Invalid default parameters for %s", testName), "See https://cloud.vespa.ai/en/reference/testing") @@ -136,40 +130,40 @@ func runTest(testPath string, target vespa.Target, dryRun bool) string { fatalErrHint(fmt.Errorf("a test must have at least one step, but none were found in %s", testPath), "See https://cloud.vespa.ai/en/reference/testing") } for i, step := range test.Steps { - stepName := step.Name - if stepName == "" { - stepName = fmt.Sprintf("step %d", i+1) + stepName := fmt.Sprintf("Step %d", i+1) + if step.Name != "" { + stepName += ": " + step.Name } - failure, longFailure, err := verify(step, path.Dir(testPath), test.Defaults.Cluster, defaultParameters, target, dryRun) + failure, longFailure, err := verify(step, test.Defaults.Cluster, defaultParameters, context) if err != nil { fmt.Fprintln(stderr) fatalErrHint(err, fmt.Sprintf("Error in %s", stepName), "See https://cloud.vespa.ai/en/reference/testing") } - if !dryRun { + if !context.dryRun { if failure != "" { - fmt.Fprintf(stdout, " %s %s:\n%s\n", color.Red("Failed"), color.Cyan(stepName), longFailure) + fmt.Fprintf(stdout, " %s\n%s:\n%s\n", color.Red("failed"), stepName, longFailure) return fmt.Sprintf("%s: %s: %s", testName, stepName, failure) } if i == 0 { fmt.Fprintf(stdout, " ") } - fmt.Fprint(stdout, color.Green(".")) + fmt.Fprint(stdout, ".") } } - if !dryRun { + if !context.dryRun { fmt.Fprintln(stdout, color.Green(" OK")) } return "" } // Asserts specified response is obtained for request, or returns a failure message, or an error if this fails -func verify(step step, testsPath string, defaultCluster string, defaultParameters map[string]string, target vespa.Target, dryRun bool) (string, string, error) { - requestBody, err := getBody(step.Request.BodyRaw, testsPath) +func verify(step step, defaultCluster string, defaultParameters map[string]string, context testContext) (string, string, error) { + requestBody, err := getBody(step.Request.BodyRaw, context.testsPath) if err != nil { return "", "", err } - parameters, err := getParameters(step.Request.ParametersRaw, testsPath) + parameters, err := getParameters(step.Request.ParametersRaw, context.testsPath) if err != nil { return "", "", err } @@ -184,19 +178,12 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter cluster = defaultCluster } - var service *vespa.Service - if !dryRun { - service, err = target.Service("query", 0, 0, cluster) - if err != nil { - return "", "", err - } - } - method := step.Request.Method if method == "" { method = "GET" } + var service *vespa.Service requestUri := step.Request.URI if requestUri == "" { requestUri = "/search/" @@ -206,12 +193,12 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter return "", "", err } externalEndpoint := requestUrl.IsAbs() - if !externalEndpoint { - baseURL := "http://dummy/" - if service != nil { - baseURL = service.BaseURL + if !externalEndpoint && !context.dryRun { + service, err = context.target().Service("query", 0, 0, cluster) + if err != nil { + return "", "", err } - requestUrl, err = url.ParseRequestURI(baseURL + requestUri) + requestUrl, err = url.ParseRequestURI(service.BaseURL + requestUri) if err != nil { return "", "", err } @@ -238,7 +225,7 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter statusCode = 200 } - responseBodySpecBytes, err := getBody(step.Response.BodyRaw, testsPath) + responseBodySpecBytes, err := getBody(step.Response.BodyRaw, context.testsPath) if err != nil { return "", "", err } @@ -250,7 +237,7 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter } } - if dryRun { + if context.dryRun { return "", "", nil } @@ -267,8 +254,13 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter defer response.Body.Close() if statusCode != response.StatusCode { - failure := fmt.Sprintf("Unexpected %s: %s", "status code", color.Red(response.StatusCode)) - return failure, fmt.Sprintf("%s\nExpected: %s\nActual response:\n%s", failure, color.Cyan(statusCode), util.ReaderToJSON(response.Body)), nil + return fmt.Sprintf("Unexpected status code: %d", color.Red(response.StatusCode)), + fmt.Sprintf("Unexpected status code\nExpected: %d\nActual: %d\nRequested: %s at %s\nResponse:\n%s", + color.Cyan(statusCode), + color.Red(response.StatusCode), + color.Cyan(method), + color.Cyan(requestUrl), + util.ReaderToJSON(response.Body)), nil } if responseBodySpec == nil { @@ -285,20 +277,24 @@ func verify(step step, testsPath string, defaultCluster string, defaultParameter return "", "", fmt.Errorf("got non-JSON response; %w:\n%s", err, string(responseBodyBytes)) } - failure, expected, err := compare(responseBodySpec, responseBody, "") + failure, expected, actual, err := compare(responseBodySpec, responseBody, "") if failure != "" { responsePretty, _ := json.MarshalIndent(responseBody, "", " ") longFailure := failure if expected != "" { - longFailure += "\n" + expected + longFailure += "\nExpected: " + expected + } + if actual != "" { + failure += ": " + actual + longFailure += "\nActual: " + actual } - longFailure += "\nActual response:\n" + string(responsePretty) + longFailure += fmt.Sprintf("\nRequested: %s at %s\nResponse:\n%s", color.Cyan(method), color.Cyan(requestUrl), string(responsePretty)) return failure, longFailure, err } return "", "", err } -func compare(expected interface{}, actual interface{}, path string) (string, string, error) { +func compare(expected interface{}, actual interface{}, path string) (string, string, string, error) { typeMatch := false valueMatch := false switch u := expected.(type) { @@ -323,18 +319,15 @@ func compare(expected interface{}, actual interface{}, path string) (string, str if ok { if len(u) == len(v) { for i, e := range u { - failure, expected, err := compare(e, v[i], fmt.Sprintf("%s/%d", path, i)) - if failure != "" || err != nil { - return failure, expected, err + if failure, expected, actual, err := compare(e, v[i], fmt.Sprintf("%s/%d", path, i)); failure != "" || err != nil { + return failure, expected, actual, err } } valueMatch = true } else { - return fmt.Sprintf("Unexpected %s at %s: %d", - "number of elements", - color.Cyan(path), - color.Red(len(v))), - fmt.Sprintf("Expected: %d", color.Cyan(len(u))), + return fmt.Sprintf("Unexpected number of elements at %s", color.Cyan(path)), + fmt.Sprintf("%d", color.Cyan(len(u))), + fmt.Sprintf("%d", color.Red(len(v))), nil } } @@ -346,17 +339,16 @@ func compare(expected interface{}, actual interface{}, path string) (string, str childPath := fmt.Sprintf("%s/%s", path, strings.ReplaceAll(strings.ReplaceAll(n, "~", "~0"), "/", "~1")) f, ok := v[n] if !ok { - return fmt.Sprintf("Missing expected field at %s", color.Red(childPath)), "", nil + return fmt.Sprintf("Missing expected field at %s", color.Red(childPath)), "", "", nil } - failure, expected, err := compare(e, f, childPath) - if failure != "" || err != nil { - return failure, expected, err + if failure, expected, actual, err := compare(e, f, childPath); failure != "" || err != nil { + return failure, expected, actual, err } } valueMatch = true } default: - return "", "", fmt.Errorf("unexpected JSON type for value '%v'", expected) + return "", "", "", fmt.Errorf("unexpected JSON type for value '%v'", expected) } if !valueMatch { @@ -369,21 +361,22 @@ func compare(expected interface{}, actual interface{}, path string) (string, str } expectedJson, _ := json.Marshal(expected) actualJson, _ := json.Marshal(actual) - return fmt.Sprintf("Unexpected %s at %s: %s", - mismatched, - color.Cyan(path), - color.Red(actualJson)), - fmt.Sprintf("Expected: %s", color.Cyan(expectedJson)), + return fmt.Sprintf("Unexpected %s at %s", mismatched, color.Cyan(path)), + fmt.Sprintf("%s", color.Cyan(expectedJson)), + fmt.Sprintf("%s", color.Red(actualJson)), nil } - return "", "", nil + return "", "", "", nil } func getParameters(parametersRaw []byte, testsPath string) (map[string]string, error) { if parametersRaw != nil { var parametersPath string if err := json.Unmarshal(parametersRaw, ¶metersPath); err == nil { - resolvedParametersPath := path.Join(testsPath, parametersPath) + if err = validateRelativePath(parametersPath); err != nil { + return nil, err + } + resolvedParametersPath := filepath.Join(testsPath, parametersPath) parametersRaw, err = ioutil.ReadFile(resolvedParametersPath) if err != nil { return nil, fmt.Errorf("failed to read request parameters at %s: %w", resolvedParametersPath, err) @@ -401,7 +394,10 @@ func getParameters(parametersRaw []byte, testsPath string) (map[string]string, e func getBody(bodyRaw []byte, testsPath string) ([]byte, error) { var bodyPath string if err := json.Unmarshal(bodyRaw, &bodyPath); err == nil { - resolvedBodyPath := path.Join(testsPath, bodyPath) + if err = validateRelativePath(bodyPath); err != nil { + return nil, err + } + resolvedBodyPath := filepath.Join(testsPath, bodyPath) bodyRaw, err = ioutil.ReadFile(resolvedBodyPath) if err != nil { return nil, fmt.Errorf("failed to read body file at %s: %w", resolvedBodyPath, err) @@ -410,6 +406,17 @@ func getBody(bodyRaw []byte, testsPath string) ([]byte, error) { return bodyRaw, nil } +func validateRelativePath(relPath string) error { + if filepath.IsAbs(relPath) { + return fmt.Errorf("path must be relative, but was '%s'", relPath) + } + cleanPath := filepath.Clean(relPath) + if strings.HasPrefix(cleanPath, "../../../") { + return fmt.Errorf("path may not point outside src/test/application, but '%s' does", relPath) + } + return nil +} + type test struct { Name string `json:"name"` Defaults defaults `json:"defaults"` @@ -439,3 +446,16 @@ type response struct { Code int `json:"code"` BodyRaw json.RawMessage `json:"body"` } + +type testContext struct { + lazyTarget vespa.Target + testsPath string + dryRun bool +} + +func (t *testContext) target() vespa.Target { + if t.lazyTarget == nil { + t.lazyTarget = getTarget() + } + return t.lazyTarget +} diff --git a/client/go/cmd/test_test.go b/client/go/cmd/test_test.go index 4c5e4c3f1e5..6649353df77 100644 --- a/client/go/cmd/test_test.go +++ b/client/go/cmd/test_test.go @@ -5,6 +5,7 @@ package cmd import ( + "fmt" "github.com/vespa-engine/vespa/client/go/util" "github.com/vespa-engine/vespa/client/go/vespa" "io/ioutil" @@ -23,29 +24,41 @@ func TestSuite(t *testing.T) { searchResponse, _ := ioutil.ReadFile("testdata/tests/response.json") client.NextStatus(200) client.NextStatus(200) - for i := 0; i < 10; i++ { + for i := 0; i < 11; i++ { client.NextResponse(200, string(searchResponse)) } expectedBytes, _ := ioutil.ReadFile("testdata/tests/expected-suite.out") outBytes, errBytes := execute(command{args: []string{"test", "testdata/tests/system-test"}}, t, client) - assert.Equal(t, string(expectedBytes), outBytes) - assert.Equal(t, "", errBytes) baseUrl := "http://127.0.0.1:8080" urlWithQuery := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s" requests := []*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(urlWithQuery), createSearchRequest(urlWithQuery)} - for i := 0; i < 8; i++ { + requests = append(requests, createSearchRequest(baseUrl+"/search/")) + requests = append(requests, createSearchRequest(baseUrl+"/search/?foo=%2F")) + for i := 0; i < 7; i++ { requests = append(requests, createSearchRequest(baseUrl+"/search/")) } assertRequests(requests, client, t) + fmt.Println(outBytes) + assert.Equal(t, string(expectedBytes), outBytes) + assert.Equal(t, "", errBytes) +} + +func TestIllegalFileReference(t *testing.T) { + client := &mockHttpClient{} + client.NextStatus(200) + client.NextStatus(200) + _, errBytes := execute(command{args: []string{"test", "testdata/tests/production-test/illegal-reference.json"}}, t, client) + assertRequests([]*http.Request{createRequest("GET", "http://127.0.0.1:8080/search/", "{}")}, client, t) + assert.Equal(t, "\nError: path may not point outside src/test/application, but 'foo/../../../../this-is-not-ok.json' does\nHint: Error in Step 2\nHint: See https://cloud.vespa.ai/en/reference/testing\n", errBytes) } func TestProductionTest(t *testing.T) { client := &mockHttpClient{} client.NextStatus(200) outBytes, errBytes := execute(command{args: []string{"test", "testdata/tests/production-test/external.json"}}, t, client) - assert.Equal(t, "Running external.json: . OK\n\nSuccess: 1 test OK\n", outBytes) + assert.Equal(t, "external.json: . OK\n\nSuccess: 1 test OK\n", outBytes) assert.Equal(t, "", errBytes) assertRequests([]*http.Request{createRequest("GET", "https://my.service:123/path?query=wohoo", "")}, client, t) } diff --git a/client/go/cmd/testdata/empty.json b/client/go/cmd/testdata/empty.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/client/go/cmd/testdata/empty.json @@ -0,0 +1 @@ +{}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/expected-suite.out b/client/go/cmd/testdata/tests/expected-suite.out index 963889b8019..df916f50a95 100644 --- a/client/go/cmd/testdata/tests/expected-suite.out +++ b/client/go/cmd/testdata/tests/expected-suite.out @@ -1,8 +1,11 @@ -Running my test: .... OK -Running wrong-bool-value.json: Failed step 1: -Unexpected value at /root/coverage/full: true +My test: .... OK +wrong-bool-value.json: failed +Step 1: +Unexpected value at /root/coverage/full Expected: false -Actual response: +Actual: true +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -38,10 +41,55 @@ Actual response: } } -Running wrong-element-count.json: Failed step 1: -Unexpected number of elements at /root/children: 1 +wrong-code.json: failed +Step 1: +Unexpected status code +Expected: 123 +Actual: 200 +Requested: GET at http://127.0.0.1:8080/search/?foo=%2F +Response: +{ + "root": { + "children": [ + { + "fields": { + "artist": "Foo Fighters", + "documentid": "id:test:music::doc", + "sddocname": "music" + }, + "id": "id:test:music::doc", + "relevance": 0.38186238359951247, + "source": "music" + } + ], + "coverage": { + "coverage": 100, + "documents": 1, + "full": true, + "nodes": 1, + "results": 1, + "resultsFull": 1 + }, + "fields": { + "totalCount": 1 + }, + "id": "toplevel", + "relevance": 1 + }, + "timing": { + "querytime": 0.003, + "searchtime": 0.004, + "summaryfetchtime": 0 + } +} + +wrong-element-count.json: failed +Step 1: +Unexpected number of elements at /root/children Expected: 0 -Actual response: +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -77,9 +125,11 @@ Actual response: } } -Running wrong-field-name.json: Failed step 1: +wrong-field-name.json: failed +Step 1: Missing expected field at /root/fields/totalCountDracula -Actual response: +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -115,10 +165,13 @@ Actual response: } } -Running wrong-float-value.json: Failed step 1: -Unexpected value at /root/children/0/relevance: 0.38186238359951247 +wrong-float-value.json: failed +Step 1: +Unexpected value at /root/children/0/relevance Expected: 0.381862373599 -Actual response: +Actual: 0.38186238359951247 +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -154,10 +207,13 @@ Actual response: } } -Running wrong-int-value.json: Failed step 1: -Unexpected value at /root/fields/totalCount: 1 +wrong-int-value.json: failed +Step 1: +Unexpected value at /root/fields/totalCount Expected: 2 -Actual response: +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -193,9 +249,11 @@ Actual response: } } -Running wrong-null-value.json: Failed step 1: +wrong-null-value.json: failed +Step 1: Missing expected field at /boot -Actual response: +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -231,10 +289,13 @@ Actual response: } } -Running wrong-string-value.json: Failed step 1: -Unexpected value at /root/children/0/fields/artist: "Foo Fighters" +wrong-string-value.json: failed +Step 1: +Unexpected value at /root/children/0/fields/artist Expected: "Boo Fighters" -Actual response: +Actual: "Foo Fighters" +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -270,10 +331,13 @@ Actual response: } } -Running wrong-type.json: Failed step 1: -Unexpected type at /root/fields/totalCount: 1 +wrong-type.json: failed +Step 1: +Unexpected type at /root/fields/totalCount Expected: "1" -Actual response: +Actual: 1 +Requested: GET at http://127.0.0.1:8080/search/ +Response: { "root": { "children": [ @@ -309,12 +373,13 @@ Actual response: } } -Failure: 8 of 9 tests failed: -wrong-bool-value.json: step 1: Unexpected value at /root/coverage/full: true -wrong-element-count.json: step 1: Unexpected number of elements at /root/children: 1 -wrong-field-name.json: step 1: Missing expected field at /root/fields/totalCountDracula -wrong-float-value.json: step 1: Unexpected value at /root/children/0/relevance: 0.38186238359951247 -wrong-int-value.json: step 1: Unexpected value at /root/fields/totalCount: 1 -wrong-null-value.json: step 1: Missing expected field at /boot -wrong-string-value.json: step 1: Unexpected value at /root/children/0/fields/artist: "Foo Fighters" -wrong-type.json: step 1: Unexpected type at /root/fields/totalCount: 1 +Failure: 9 of 10 tests failed: +wrong-bool-value.json: Step 1: Unexpected value at /root/coverage/full: true +wrong-code.json: Step 1: Unexpected status code: 200 +wrong-element-count.json: Step 1: Unexpected number of elements at /root/children: 1 +wrong-field-name.json: Step 1: Missing expected field at /root/fields/totalCountDracula +wrong-float-value.json: Step 1: Unexpected value at /root/children/0/relevance: 0.38186238359951247 +wrong-int-value.json: Step 1: Unexpected value at /root/fields/totalCount: 1 +wrong-null-value.json: Step 1: Missing expected field at /boot +wrong-string-value.json: Step 1: Unexpected value at /root/children/0/fields/artist: "Foo Fighters" +wrong-type.json: Step 1: Unexpected type at /root/fields/totalCount: 1 diff --git a/client/go/cmd/testdata/tests/expected.out b/client/go/cmd/testdata/tests/expected.out index 084fb10f72a..2ca35fe6a37 100644 --- a/client/go/cmd/testdata/tests/expected.out +++ b/client/go/cmd/testdata/tests/expected.out @@ -1,3 +1,3 @@ -Running my test: .... OK +My test: .... OK Success: 1 test OK diff --git a/client/go/cmd/testdata/tests/production-test/illegal-reference.json b/client/go/cmd/testdata/tests/production-test/illegal-reference.json new file mode 100644 index 00000000000..edd8a2fafeb --- /dev/null +++ b/client/go/cmd/testdata/tests/production-test/illegal-reference.json @@ -0,0 +1,14 @@ +{ + "steps": [ + { + "request": { + "body": "foo/../../../empty.json" + } + }, + { + "request": { + "body": "foo/../../../../this-is-not-ok.json" + } + } + ] +}
\ No newline at end of file diff --git a/client/go/cmd/testdata/tests/system-test/test.json b/client/go/cmd/testdata/tests/system-test/test.json index f53df929dbd..2e327b5e5df 100644 --- a/client/go/cmd/testdata/tests/system-test/test.json +++ b/client/go/cmd/testdata/tests/system-test/test.json @@ -1,5 +1,5 @@ { - "name": "my test", + "name": "My test", "defaults": { "cluster": "container", "parameters": { diff --git a/client/go/cmd/testdata/tests/system-test/wrong-code.json b/client/go/cmd/testdata/tests/system-test/wrong-code.json new file mode 100644 index 00000000000..c325054faa1 --- /dev/null +++ b/client/go/cmd/testdata/tests/system-test/wrong-code.json @@ -0,0 +1,14 @@ +{ + "steps": [ + { + "request": { + "parameters": { + "foo": "/" + } + }, + "response": { + "code": 123 + } + } + ] +} diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json index 28b07fcd94f..8ad9f66ee6a 100644 --- a/config-model-api/abi-spec.json +++ b/config-model-api/abi-spec.json @@ -611,7 +611,8 @@ "public static final enum com.yahoo.config.application.api.ValidationId accessControl", "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange", "public static final enum com.yahoo.config.application.api.ValidationId redundancyIncrease", - "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne" + "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne", + "public static final enum com.yahoo.config.application.api.ValidationId pagedSettingRemoval" ] }, "com.yahoo.config.application.api.ValidationOverrides$Allow": { diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java index b8f6caa70d3..71e0b0926b9 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java @@ -25,7 +25,8 @@ public enum ValidationId { accessControl("access-control"), // Internal use, used in zones where there should be no access-control globalEndpointChange("global-endpoint-change"), // Changing global endpoints redundancyIncrease("redundancy-increase"), // Increasing redundancy - may easily cause feed blocked - redundancyOne("redundancy-one"); // redundancy=1 requires a validation override on first deployment + redundancyOne("redundancy-one"), // redundancy=1 requires a validation override on first deployment + pagedSettingRemoval("paged-setting-removal"); // May cause content nodes to run out of memory private final String id; diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java index 6ed2f3daa30..3df93f7d08d 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java @@ -109,6 +109,7 @@ public interface ModelContext { @ModelFeatureFlag(owners = {"arnej"}) default boolean ignoreThreadStackSizes() { return false; } @ModelFeatureFlag(owners = {"vekterli", "geirst"}) default boolean unorderedMergeChaining() { return false; } @ModelFeatureFlag(owners = {"arnej"}) default boolean useV8GeoPositions() { return false; } + @ModelFeatureFlag(owners = {"arnej", "baldersheim"}) default boolean useV8DocManagerCfg() { return false; } } /** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */ diff --git a/config-model/.gitignore b/config-model/.gitignore index b0f358e8113..4cf50da0853 100644 --- a/config-model/.gitignore +++ b/config-model/.gitignore @@ -4,3 +4,4 @@ /target /src/test/integration/*/copy/ /src/test/integration/*/models.generated/ +*.cfg.actual diff --git a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java index 118714ca2b1..8848759b415 100644 --- a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java +++ b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java @@ -48,9 +48,14 @@ public class DataTypeRepo implements DataTypeCollection { { throw new IllegalStateException("Data type '" + type.getName() + "' is not registered."); } - typeByName.remove(type.getName()); + var oldByName = typeByName.remove(type.getName()); + var oldById = typeById.remove(type.getId()); + if (oldByName != oldById) { + throw new IllegalStateException("Data type '" + type.getName() + + "' inconsistent replace, by name: " + oldByName + + " but by id: " + oldById); + } typeByName.put(type.getName(), type); - typeById.remove(type.getId()); typeById.put(type.getId(), type); return this; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java index 8809cdeacc8..170753a6ff1 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java @@ -26,6 +26,7 @@ import com.yahoo.vespa.documentmodel.FieldView; import com.yahoo.vespa.documentmodel.SearchDef; import com.yahoo.vespa.documentmodel.SearchField; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -184,57 +185,94 @@ public class DocumentModelBuilder { } } } + + // This is how you make a "Pair" class in java.... + private static class TypeReplacement extends AbstractMap.SimpleEntry<DataType,DataType> { + DataType oldType() { return getKey(); } + DataType newType() { return getValue(); } + public TypeReplacement(DataType oldType, DataType newType) { + super(oldType, newType); + } + } + private void addDocumentTypes(List<SDDocumentType> docList) { LinkedList<NewDocumentType> lst = new LinkedList<>(); for (SDDocumentType doc : docList) { lst.add(convert(doc)); model.getDocumentManager().add(lst.getLast()); } + Set<TypeReplacement> replacements = new HashSet<>(); + for(NewDocumentType doc : lst) { + resolveTemporaries(doc.getAllTypes(), lst, replacements); + } for(NewDocumentType doc : lst) { - resolveTemporaries(doc.getAllTypes(), lst); + for (var entry : replacements) { + var old = entry.oldType(); + if (doc.getDataType(old.getId()) == old) { + doc.replace(entry.newType()); + } + } } } - private static void resolveTemporaries(DataTypeCollection dtc, Collection<NewDocumentType> docs) { + + private static void resolveTemporaries(DataTypeCollection dtc, + Collection<NewDocumentType> docs, + Set<TypeReplacement> replacements) + { for (DataType type : dtc.getTypes()) { - resolveTemporariesRecurse(type, dtc, docs); + resolveTemporariesRecurse(type, dtc, docs, replacements); } } @SuppressWarnings("deprecation") private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo, - Collection<NewDocumentType> docs) { + Collection<NewDocumentType> docs, + Set<TypeReplacement> replacements) + { + DataType original = type; if (type instanceof TemporaryStructuredDataType) { - DataType struct = repo.getDataType(type.getId()); - if (struct != null) - type = struct; - else - type = getDocumentType(docs, type.getId()); - } - else if (type instanceof StructDataType) { + DataType other = repo.getDataType(type.getId()); + if (other == null || other == type) { + other = getDocumentType(docs, type.getId()); + } + // maybe warning if null here? + if (other != null) { + type = other; + } + } else if (type instanceof DocumentType || type instanceof NewDocumentType) { + DataType other = getDocumentType(docs, type.getId()); + // maybe warning if null here? + if (other != null) { + type = other; + } + } else if (type instanceof StructDataType) { StructDataType dt = (StructDataType) type; for (com.yahoo.document.Field field : dt.getFields()) { if (field.getDataType() != type) { // XXX deprecated: - field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs)); + field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs, replacements)); } } } else if (type instanceof MapDataType) { MapDataType t = (MapDataType) type; - t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs)); - t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs)); + t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs, replacements)); + t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs, replacements)); } else if (type instanceof CollectionDataType) { CollectionDataType t = (CollectionDataType) type; - t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs)); + t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs, replacements)); } else if (type instanceof ReferenceDataType) { ReferenceDataType t = (ReferenceDataType) type; if (t.getTargetType() instanceof TemporaryStructuredDataType) { - DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs); + DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs, replacements); t.setTargetType((StructuredDataType) targetType); } } + if (type != original) { + replacements.add(new TypeReplacement(original, type)); + } return type; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java index e2af71ebbf3..fef7ff56763 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java @@ -17,7 +17,6 @@ import com.yahoo.tensor.TensorType; import com.yahoo.tensor.evaluation.TypeContext; import java.util.ArrayDeque; -import java.util.Collection; import java.util.Collections; import java.util.Deque; import java.util.HashMap; @@ -65,7 +64,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement globallyResolvedTypes = new HashMap<>(); } - private MapEvaluationTypeContext(ImmutableMap<String, ExpressionFunction> functions, + private MapEvaluationTypeContext(Map<String, ExpressionFunction> functions, Map<String, String> bindings, Optional<MapEvaluationTypeContext> parent, Map<Reference, TensorType> featureTypes, @@ -250,7 +249,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement private Optional<ExpressionFunction> functionInvocation(Reference reference) { if (reference.output() != null) return Optional.empty(); - ExpressionFunction function = functions().get(reference.name()); + ExpressionFunction function = getFunctions().get(reference.name()); if (function == null) return Optional.empty(); if (function.arguments().size() != reference.arguments().size()) return Optional.empty(); return Optional.of(function); @@ -348,7 +347,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement @Override public MapEvaluationTypeContext withBindings(Map<String, String> bindings) { - return new MapEvaluationTypeContext(functions(), + return new MapEvaluationTypeContext(getFunctions(), bindings, Optional.of(this), featureTypes, diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java index 0a51ce3dda5..5ac6dd46102 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java @@ -114,6 +114,7 @@ public final class Attribute implements Cloneable, Serializable { public String getName() { return myName; } public String getExportAttributeTypeName() { return exportAttributeTypeName; } + @Override public String toString() { return "type: " + myName; } @@ -134,9 +135,11 @@ public final class Attribute implements Cloneable, Serializable { public String getName() { return name; } + @Override public String toString() { return "collectiontype: " + name; } + } /** Creates an attribute with default settings */ @@ -406,6 +409,7 @@ public final class Attribute implements Cloneable, Serializable { } } + @Override public String toString() { return "attribute '" + name + "' (" + type + ")"; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java index d108a620fd9..2a4f4f18759 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/PagedAttributeValidator.java @@ -16,7 +16,6 @@ import com.yahoo.vespa.model.container.search.QueryProfiles; */ public class PagedAttributeValidator extends Processor { - public PagedAttributeValidator(Schema schema, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, @@ -40,8 +39,8 @@ public class PagedAttributeValidator extends Processor { private void validatePagedSetting(Field field, Attribute attribute) { var tensorType = attribute.tensorType(); - if (!tensorType.isPresent() || - !isDenseTensorType(tensorType.get())) { + if (tensorType.isEmpty() + || !isDenseTensorType(tensorType.get())) { fail(schema, field, "The 'paged' attribute setting is only supported for dense tensor types"); } } @@ -49,4 +48,5 @@ public class PagedAttributeValidator extends Processor { private boolean isDenseTensorType(TensorType type) { return type.dimensions().stream().allMatch(d -> d.isIndexed()); } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java index fdbb1d8c8e0..4cfd5c84550 100644 --- a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java +++ b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java @@ -11,8 +11,11 @@ import com.yahoo.documentmodel.NewDocumentType; import com.yahoo.documentmodel.VespaDocumentType; import com.yahoo.searchdefinition.document.FieldSet; import com.yahoo.vespa.documentmodel.DocumentModel; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; /** @@ -35,7 +38,8 @@ public class DocumentManager { for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); - if ( documentType != VespaDocumentType.INSTANCE) { + if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { + handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); @@ -46,10 +50,16 @@ public class DocumentManager { @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { - for (DataType dataType : type.getTypes()) { + List<DataType> todo = new ArrayList<>(type.getTypes()); + Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) + ? a.getId() - b.getId() + : a.getName().compareTo(b.getName()))); + for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); - if (dataType instanceof TemporaryStructuredDataType) continue; + if (dataType instanceof TemporaryStructuredDataType) { + throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName()); + } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); @@ -98,15 +108,7 @@ public class DocumentManager { keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { - DocumentType dt = (DocumentType) type; - Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); - builder.documenttype(doc); - doc. - name(dt.getName()). - headerstruct(dt.contentStruct().getId()); - for (DocumentType inherited : dt.getInheritedTypes()) { - doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); - } + throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName()); } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); @@ -120,7 +122,7 @@ public class DocumentManager { buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { - //Ignored + throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java index 973f7f5cc40..12f0f717a19 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java @@ -429,6 +429,7 @@ public class VespaMetricSet { addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); + addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); // jobs metrics.add(new Metric("content.proton.documentdb.job.total.average")); @@ -583,6 +584,15 @@ public class VespaMetricSet { metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); + // feeding + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); + metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); + return metrics; } @@ -618,6 +628,12 @@ public class VespaMetricSet { metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); // TODO: Remove in Vespa 8 + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); + metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java index 10d97cbb58c..d1dc2b84c8a 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java @@ -39,7 +39,7 @@ public class QuotaValidator extends Validator { var maxSpend = model.allClusters().stream() .filter(id -> !adminClusterIds(model).contains(id)) .map(id -> model.provisioned().all().getOrDefault(id, zeroCapacity)) - .mapToDouble(c -> c.maxResources().cost()) + .mapToDouble(c -> c.maxResources().cost()) // TODO: This may be unspecified -> 0 .sum(); var actualSpend = model.allocatedHosts().getHosts().stream() diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java index 25a570e44a2..14fb903a547 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java @@ -41,7 +41,7 @@ public class ClusterSizeReductionValidator implements ChangeValidator { int currentSize = current.minResources().nodes(); int nextSize = next.minResources().nodes(); // don't allow more than 50% reduction, but always allow to reduce size with 1 - if ( nextSize < ((double)currentSize) * 0.5 && nextSize != currentSize - 1) + if ( nextSize < currentSize * 0.5 && nextSize != currentSize - 1) overrides.invalid(ValidationId.clusterSizeReduction, "Size reduction in '" + clusterId.value() + "' is too large: " + "New min size must be at least 50% of the current min size. " + diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java index a43c5b71903..8c333a099d0 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexedSearchClusterChangeValidator.java @@ -27,24 +27,29 @@ import java.util.stream.Collectors; public class IndexedSearchClusterChangeValidator implements ChangeValidator { @Override - public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) { + public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, + ValidationOverrides overrides, Instant now) { List<ConfigChangeAction> result = new ArrayList<>(); for (Map.Entry<String, ContentCluster> currentEntry : current.getContentClusters().entrySet()) { ContentCluster nextCluster = next.getContentClusters().get(currentEntry.getKey()); if (nextCluster != null && nextCluster.getSearch().hasIndexedCluster()) { - result.addAll(validateContentCluster(currentEntry.getValue(), nextCluster)); + result.addAll(validateContentCluster(currentEntry.getValue(), nextCluster, overrides, now)); } } return result; } private static List<ConfigChangeAction> validateContentCluster(ContentCluster currentCluster, - ContentCluster nextCluster) { - return validateDocumentDatabases(currentCluster, nextCluster); + ContentCluster nextCluster, + ValidationOverrides overrides, + Instant now) { + return validateDocumentDatabases(currentCluster, nextCluster, overrides, now); } private static List<ConfigChangeAction> validateDocumentDatabases(ContentCluster currentCluster, - ContentCluster nextCluster) { + ContentCluster nextCluster, + ValidationOverrides overrides, + Instant now) { List<ConfigChangeAction> result = new ArrayList<>(); for (DocumentDatabase currentDb : getDocumentDbs(currentCluster.getSearch())) { String docTypeName = currentDb.getName(); @@ -52,7 +57,7 @@ public class IndexedSearchClusterChangeValidator implements ChangeValidator { filter(db -> db.getName().equals(docTypeName)).findFirst(); if (nextDb.isPresent()) { result.addAll(validateDocumentDatabase(currentCluster, nextCluster, docTypeName, - currentDb, nextDb.get())); + currentDb, nextDb.get(), overrides, now)); } } return result; @@ -62,11 +67,19 @@ public class IndexedSearchClusterChangeValidator implements ChangeValidator { ContentCluster nextCluster, String docTypeName, DocumentDatabase currentDb, - DocumentDatabase nextDb) { + DocumentDatabase nextDb, + ValidationOverrides overrides, + Instant now) { NewDocumentType currentDocType = currentCluster.getDocumentDefinitions().get(docTypeName); NewDocumentType nextDocType = nextCluster.getDocumentDefinitions().get(docTypeName); List<VespaConfigChangeAction> result = - new DocumentDatabaseChangeValidator(currentCluster.id(), currentDb, currentDocType, nextDb, nextDocType).validate(); + new DocumentDatabaseChangeValidator(currentCluster.id(), + currentDb, + currentDocType, + nextDb, + nextDocType, + overrides, + now).validate(); return modifyActions(result, getSearchNodeServices(nextCluster.getSearch().getIndexed()), docTypeName); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java index 431bca3fb5a..1957c52e841 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; +import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.documentmodel.NewDocumentType; import com.yahoo.searchdefinition.derived.AttributeFields; @@ -12,6 +14,7 @@ import com.yahoo.searchdefinition.document.HnswIndexParams; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import com.yahoo.vespa.model.application.validation.change.VespaRestartAction; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -33,6 +36,8 @@ public class AttributeChangeValidator { private final AttributeFields nextFields; private final IndexSchema nextIndexSchema; private final NewDocumentType nextDocType; + private final ValidationOverrides overrides; + private final Instant now; public AttributeChangeValidator(ClusterSpec.Id id, AttributeFields currentFields, @@ -40,7 +45,9 @@ public class AttributeChangeValidator { NewDocumentType currentDocType, AttributeFields nextFields, IndexSchema nextIndexSchema, - NewDocumentType nextDocType) { + NewDocumentType nextDocType, + ValidationOverrides overrides, + Instant now) { this.id = id; this.currentFields = currentFields; this.currentIndexSchema = currentIndexSchema; @@ -48,6 +55,8 @@ public class AttributeChangeValidator { this.nextFields = nextFields; this.nextIndexSchema = nextIndexSchema; this.nextDocType = nextDocType; + this.overrides = overrides; + this.now = now; } public List<VespaConfigChangeAction> validate() { @@ -97,23 +106,23 @@ public class AttributeChangeValidator { private List<VespaConfigChangeAction> validateAttributeSettings() { List<VespaConfigChangeAction> result = new ArrayList<>(); - for (Attribute nextAttr : nextFields.attributes()) { - Attribute currAttr = currentFields.getAttribute(nextAttr.getName()); - if (currAttr != null) { - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result); - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isPaged, "paged", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result); - validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result); - - validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); - if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) { - validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); - validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); + for (Attribute next : nextFields.attributes()) { + Attribute current = currentFields.getAttribute(next.getName()); + if (current != null) { + validateAttributeSetting(id, current, next, Attribute::isFastSearch, "fast-search", result); + validateAttributeSetting(id, current, next, Attribute::isFastAccess, "fast-access", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result); + validateAttributeSetting(id, current, next, Attribute::isHuge, "huge", result); + validateAttributeSetting(id, current, next, Attribute::isPaged, "paged", result); + validatePagedAttributeRemoval(current, next); + validateAttributeSetting(id, current, next, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result); + validateAttributeSetting(id, current, next, Attribute::isEnabledOnlyBitVector, "rank: filter", result); + validateAttributeSetting(id, current, next, Attribute::distanceMetric, "distance-metric", result); + validateAttributeSetting(id, current, next, AttributeChangeValidator::hasHnswIndex, "indexing: index", result); + if (hasHnswIndex(current) && hasHnswIndex(next)) { + validateAttributeHnswIndexSetting(id, current, next, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result); + validateAttributeHnswIndexSetting(id, current, next, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result); } } } @@ -132,14 +141,14 @@ public class AttributeChangeValidator { } private static <T> void validateAttributeSetting(ClusterSpec.Id id, - Attribute currentAttr, Attribute nextAttr, + Attribute current, Attribute next, Function<Attribute, T> settingValueProvider, String setting, List<VespaConfigChangeAction> result) { - T currentValue = settingValueProvider.apply(currentAttr); - T nextValue = settingValueProvider.apply(nextAttr); + T currentValue = settingValueProvider.apply(current); + T nextValue = settingValueProvider.apply(next); if ( ! Objects.equals(currentValue, nextValue)) { String message = String.format("change property '%s' from '%s' to '%s'", setting, currentValue, nextValue); - result.add(new VespaRestartAction(id, new ChangeMessageBuilder(nextAttr.getName()).addChange(message).build())); + result.add(new VespaRestartAction(id, new ChangeMessageBuilder(next.getName()).addChange(message).build())); } } @@ -156,4 +165,13 @@ public class AttributeChangeValidator { } } + private void validatePagedAttributeRemoval(Attribute current, Attribute next) { + if (current.isPaged() && !next.isPaged()) { + overrides.invalid(ValidationId.pagedSettingRemoval, + current + "' has setting 'paged' removed. " + + "This may cause content nodes to run out of memory as the entire attribute is loaded into memory", + now); + } + } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java index ba0e0717c07..3481d2ce219 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/ChangeMessageBuilder.java @@ -5,10 +5,9 @@ import java.util.ArrayList; import java.util.List; /** - * Class used to build a message describing the changes in a given field. + * Builder of a message describing the changes in a given field. * * @author geirst - * @since 2014-12-09 */ public class ChangeMessageBuilder { @@ -20,10 +19,7 @@ public class ChangeMessageBuilder { } public String build() { - StringBuilder retval = new StringBuilder(); - retval.append("Field '" + fieldName + "' changed: "); - retval.append(String.join(", ", changes)); - return retval.toString(); + return "Field '" + fieldName + "' changed: " + String.join(", ", changes); } public ChangeMessageBuilder addChange(String component, String from, String to) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java index be2f49085b2..bff337adfb6 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidator.java @@ -23,17 +23,23 @@ public class DocumentDatabaseChangeValidator { private final NewDocumentType currentDocType; private final DocumentDatabase nextDatabase; private final NewDocumentType nextDocType; + private final ValidationOverrides overrides; + private final Instant now; public DocumentDatabaseChangeValidator(ClusterSpec.Id id, DocumentDatabase currentDatabase, NewDocumentType currentDocType, DocumentDatabase nextDatabase, - NewDocumentType nextDocType) { + NewDocumentType nextDocType, + ValidationOverrides overrides, + Instant now) { this.id = id; this.currentDatabase = currentDatabase; this.currentDocType = currentDocType; this.nextDatabase = nextDatabase; this.nextDocType = nextDocType; + this.overrides = overrides; + this.now = now; } public List<VespaConfigChangeAction> validate() { @@ -50,7 +56,8 @@ public class DocumentDatabaseChangeValidator { currentDatabase.getDerivedConfiguration().getAttributeFields(), currentDatabase.getDerivedConfiguration().getIndexSchema(), currentDocType, nextDatabase.getDerivedConfiguration().getAttributeFields(), - nextDatabase.getDerivedConfiguration().getIndexSchema(), nextDocType) + nextDatabase.getDerivedConfiguration().getIndexSchema(), nextDocType, + overrides, now) .validate(); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java index ce8347b66c1..5e258fde821 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeMessageBuilder.java @@ -14,7 +14,6 @@ import com.yahoo.vespa.documentmodel.SummaryTransform; * This message should be more descriptive for the end-user than just seeing the changed indexing script. * * @author geirst - * @since 2014-12-09 */ public class IndexingScriptChangeMessageBuilder { @@ -57,7 +56,7 @@ public class IndexingScriptChangeMessageBuilder { private void checkStemming(ChangeMessageBuilder builder) { Stemming currentStemming = currentField.getStemming(currentSchema); Stemming nextStemming = nextField.getStemming(nextSchema); - if (!currentStemming.equals(nextStemming)) { + if (currentStemming != nextStemming) { builder.addChange("stemming", currentStemming.getName(), nextStemming.getName()); } } @@ -65,7 +64,7 @@ public class IndexingScriptChangeMessageBuilder { private void checkNormalizing(ChangeMessageBuilder builder) { NormalizeLevel.Level currentLevel = currentField.getNormalizing().getLevel(); NormalizeLevel.Level nextLevel = nextField.getNormalizing().getLevel(); - if (!currentLevel.equals(nextLevel)) { + if (currentLevel != nextLevel) { builder.addChange("normalizing", currentLevel.toString(), nextLevel.toString()); } } @@ -77,7 +76,7 @@ public class IndexingScriptChangeMessageBuilder { if (currentSummaryField != null) { SummaryTransform currentTransform = currentSummaryField.getTransform(); SummaryTransform nextTransform = nextSummaryField.getTransform(); - if (!currentSummaryField.getTransform().equals(nextSummaryField.getTransform())) { + if (currentSummaryField.getTransform() != nextSummaryField.getTransform()) { builder.addChange("summary field '" + fieldName + "' transform", currentTransform.getName(), nextTransform.getName()); } @@ -88,7 +87,7 @@ public class IndexingScriptChangeMessageBuilder { private static String toString(Matching matching) { Matching.Type type = matching.getType(); String retval = type.getName(); - if (type.equals(Matching.Type.GRAM)) { + if (type == Matching.Type.GRAM) { retval += " (size " + matching.getGramSize() + ")"; } return retval; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java index e64a3d44bba..f6f6b6abdee 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java @@ -64,8 +64,7 @@ public class IndexingScriptChangeValidator { return Optional.empty(); } - static boolean equalScripts(ScriptExpression currentScript, - ScriptExpression nextScript) { + static boolean equalScripts(ScriptExpression currentScript, ScriptExpression nextScript) { // Output expressions are specifying in which context a field value is used (attribute, index, summary), // and do not affect how the field value is generated in the indexing doc proc. // The output expressions are therefore removed before doing the comparison. diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java index acb404a051b..a10d2c36de1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/StructFieldAttributeChangeValidator.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; -import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.document.ArrayDataType; import com.yahoo.document.DataType; @@ -15,7 +14,6 @@ import com.yahoo.searchdefinition.document.ComplexAttributeFieldUtils; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import com.yahoo.vespa.model.application.validation.change.VespaRestartAction; -import java.time.Instant; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -24,7 +22,7 @@ import java.util.stream.Collectors; /** * Validates the changes between the current and next set of struct field attributes in a document database. - + * * Complex fields of the following types are considered (as they might have struct field attributes): * - array of simple struct * - map of simple struct @@ -53,7 +51,7 @@ public class StructFieldAttributeChangeValidator { } public List<VespaConfigChangeAction> validate() { - List<VespaConfigChangeAction> result = new ArrayList(); + List<VespaConfigChangeAction> result = new ArrayList<>(); for (Field currentField : currentDocType.getAllFields()) { Field nextField = nextDocType.getField(currentField.getName()); if (nextField != null) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java index 636a3f44369..e85bbba2dca 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java @@ -31,8 +31,7 @@ public class RedundancyOnFirstDeploymentValidator extends Validator { if ( ! deployState.zone().environment().isProduction()) return; for (ContentCluster cluster : model.getContentClusters().values()) { - if (cluster.redundancy().finalRedundancy() == 1 - && cluster.redundancy().totalNodes() > cluster.redundancy().groups()) + if (cluster.redundancy().finalRedundancy() == 1 && cluster.redundancy().groups() == 1) deployState.validationOverrides().invalid(ValidationId.redundancyOne, cluster + " has redundancy 1, which will cause it to lose data " + "if a node fails. This requires an override on first deployment " + diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java index 73138d15559..562ccc44a37 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java @@ -313,7 +313,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { if (deploymentSpec.isEmpty()) return; for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) { - deployLogger.log(WARNING, deprecatedElement.humanReadableString()); + deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString()); } addIdentityProvider(cluster, diff --git a/config-model/src/test/configmodel/types/documentmanager.cfg b/config-model/src/test/configmodel/types/documentmanager.cfg index 66002968586..8b93e3a4665 100644 --- a/config-model/src/test/configmodel/types/documentmanager.cfg +++ b/config-model/src/test/configmodel/types/documentmanager.cfg @@ -13,117 +13,117 @@ datatype[0].structtype[0].field[0].detailedtype "" datatype[0].structtype[0].field[1].name "y" datatype[0].structtype[0].field[1].datatype 0 datatype[0].structtype[0].field[1].detailedtype "" -datatype[1].id -1865479609 -datatype[1].maptype[0].keytype 2 -datatype[1].maptype[0].valtype 4 -datatype[2].id 294108848 -datatype[2].structtype[0].name "folder" -datatype[2].structtype[0].version 0 -datatype[2].structtype[0].compresstype NONE -datatype[2].structtype[0].compresslevel 0 -datatype[2].structtype[0].compressthreshold 95 -datatype[2].structtype[0].compressminsize 800 -datatype[2].structtype[0].field[0].name "Version" -datatype[2].structtype[0].field[0].datatype 0 -datatype[2].structtype[0].field[0].detailedtype "" -datatype[2].structtype[0].field[1].name "Name" -datatype[2].structtype[0].field[1].datatype 2 -datatype[2].structtype[0].field[1].detailedtype "" -datatype[2].structtype[0].field[2].name "FlagsCounter" -datatype[2].structtype[0].field[2].datatype -1865479609 -datatype[2].structtype[0].field[2].detailedtype "" -datatype[2].structtype[0].field[3].name "anotherfolder" -datatype[2].structtype[0].field[3].datatype 294108848 -datatype[2].structtype[0].field[3].detailedtype "" -datatype[3].id 109267174 -datatype[3].structtype[0].name "sct" -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].compresstype NONE -datatype[3].structtype[0].compresslevel 0 -datatype[3].structtype[0].compressthreshold 95 -datatype[3].structtype[0].compressminsize 800 -datatype[3].structtype[0].field[0].name "s1" -datatype[3].structtype[0].field[0].datatype 2 -datatype[3].structtype[0].field[0].detailedtype "" -datatype[3].structtype[0].field[1].name "s2" -datatype[3].structtype[0].field[1].datatype 2 -datatype[3].structtype[0].field[1].detailedtype "" -datatype[4].id 49942803 -datatype[4].arraytype[0].datatype 16 -datatype[5].id 339965458 -datatype[5].maptype[0].keytype 2 -datatype[5].maptype[0].valtype 2 -datatype[6].id -2092985853 -datatype[6].structtype[0].name "mystruct" -datatype[6].structtype[0].version 0 -datatype[6].structtype[0].compresstype NONE -datatype[6].structtype[0].compresslevel 0 -datatype[6].structtype[0].compressthreshold 95 -datatype[6].structtype[0].compressminsize 800 -datatype[6].structtype[0].field[0].name "bytearr" -datatype[6].structtype[0].field[0].datatype 49942803 -datatype[6].structtype[0].field[0].detailedtype "" -datatype[6].structtype[0].field[1].name "mymap" -datatype[6].structtype[0].field[1].datatype 339965458 -datatype[6].structtype[0].field[1].detailedtype "" -datatype[6].structtype[0].field[2].name "title" -datatype[6].structtype[0].field[2].datatype 2 -datatype[6].structtype[0].field[2].detailedtype "" -datatype[6].structtype[0].field[3].name "structfield" -datatype[6].structtype[0].field[3].datatype 2 -datatype[6].structtype[0].field[3].detailedtype "" -datatype[7].id -1245117006 -datatype[7].arraytype[0].datatype 0 -datatype[8].id 1328286588 -datatype[8].weightedsettype[0].datatype 2 -datatype[8].weightedsettype[0].createifnonexistant false -datatype[8].weightedsettype[0].removeifzero false -datatype[9].id 2125328771 -datatype[9].weightedsettype[0].datatype 2 -datatype[9].weightedsettype[0].createifnonexistant false -datatype[9].weightedsettype[0].removeifzero true -datatype[10].id 2065577986 -datatype[10].weightedsettype[0].datatype 2 -datatype[10].weightedsettype[0].createifnonexistant true -datatype[10].weightedsettype[0].removeifzero false -datatype[11].id -1244829667 -datatype[11].arraytype[0].datatype 109267174 -datatype[12].id -1584287606 -datatype[12].maptype[0].keytype 2 -datatype[12].maptype[0].valtype 0 -datatype[13].id 2125154557 -datatype[13].maptype[0].keytype 2 -datatype[13].maptype[0].valtype 1 -datatype[14].id -1715531035 +datatype[1].id -794985308 +datatype[1].arraytype[0].datatype 1707615575 +datatype[2].id 1707615575 +datatype[2].arraytype[0].datatype -1486737430 +datatype[3].id 1416345047 +datatype[3].arraytype[0].datatype -372512406 +datatype[4].id 69621385 +datatype[4].arraytype[0].datatype 339965458 +datatype[5].id 49942803 +datatype[5].arraytype[0].datatype 16 +datatype[6].id -1245117006 +datatype[6].arraytype[0].datatype 0 +datatype[7].id 759956026 +datatype[7].arraytype[0].datatype -2092985853 +datatype[8].id -1244829667 +datatype[8].arraytype[0].datatype 109267174 +datatype[9].id -1486737430 +datatype[9].arraytype[0].datatype 2 +datatype[10].id -372512406 +datatype[10].maptype[0].keytype 0 +datatype[10].maptype[0].valtype 1707615575 +datatype[11].id 2138385264 +datatype[11].maptype[0].keytype 0 +datatype[11].maptype[0].valtype 5 +datatype[12].id -389833101 +datatype[12].maptype[0].keytype 0 +datatype[12].maptype[0].valtype 294108848 +datatype[13].id -1715531035 +datatype[13].maptype[0].keytype 0 +datatype[13].maptype[0].valtype 4 +datatype[14].id 1901258752 datatype[14].maptype[0].keytype 0 -datatype[14].maptype[0].valtype 4 -datatype[15].id 2138385264 -datatype[15].maptype[0].keytype 0 -datatype[15].maptype[0].valtype 5 -datatype[16].id 435886609 +datatype[14].maptype[0].valtype -2092985853 +datatype[15].id 435886609 +datatype[15].maptype[0].keytype 2 +datatype[15].maptype[0].valtype -1245117006 +datatype[16].id 2125154557 datatype[16].maptype[0].keytype 2 -datatype[16].maptype[0].valtype -1245117006 -datatype[17].id -1486737430 -datatype[17].arraytype[0].datatype 2 -datatype[18].id 1707615575 -datatype[18].arraytype[0].datatype -1486737430 -datatype[19].id -794985308 -datatype[19].arraytype[0].datatype 1707615575 -datatype[20].id 69621385 -datatype[20].arraytype[0].datatype 339965458 -datatype[21].id -372512406 -datatype[21].maptype[0].keytype 0 -datatype[21].maptype[0].valtype 1707615575 -datatype[22].id 1416345047 -datatype[22].arraytype[0].datatype -372512406 -datatype[23].id 1901258752 -datatype[23].maptype[0].keytype 0 -datatype[23].maptype[0].valtype -2092985853 -datatype[24].id 759956026 -datatype[24].arraytype[0].datatype -2092985853 -datatype[25].id -389833101 -datatype[25].maptype[0].keytype 0 -datatype[25].maptype[0].valtype 294108848 +datatype[16].maptype[0].valtype 1 +datatype[17].id -1584287606 +datatype[17].maptype[0].keytype 2 +datatype[17].maptype[0].valtype 0 +datatype[18].id -1865479609 +datatype[18].maptype[0].keytype 2 +datatype[18].maptype[0].valtype 4 +datatype[19].id 339965458 +datatype[19].maptype[0].keytype 2 +datatype[19].maptype[0].valtype 2 +datatype[20].id 1328286588 +datatype[20].weightedsettype[0].datatype 2 +datatype[20].weightedsettype[0].createifnonexistant false +datatype[20].weightedsettype[0].removeifzero false +datatype[21].id 2065577986 +datatype[21].weightedsettype[0].datatype 2 +datatype[21].weightedsettype[0].createifnonexistant true +datatype[21].weightedsettype[0].removeifzero false +datatype[22].id 2125328771 +datatype[22].weightedsettype[0].datatype 2 +datatype[22].weightedsettype[0].createifnonexistant false +datatype[22].weightedsettype[0].removeifzero true +datatype[23].id 294108848 +datatype[23].structtype[0].name "folder" +datatype[23].structtype[0].version 0 +datatype[23].structtype[0].compresstype NONE +datatype[23].structtype[0].compresslevel 0 +datatype[23].structtype[0].compressthreshold 95 +datatype[23].structtype[0].compressminsize 800 +datatype[23].structtype[0].field[0].name "Version" +datatype[23].structtype[0].field[0].datatype 0 +datatype[23].structtype[0].field[0].detailedtype "" +datatype[23].structtype[0].field[1].name "Name" +datatype[23].structtype[0].field[1].datatype 2 +datatype[23].structtype[0].field[1].detailedtype "" +datatype[23].structtype[0].field[2].name "FlagsCounter" +datatype[23].structtype[0].field[2].datatype -1865479609 +datatype[23].structtype[0].field[2].detailedtype "" +datatype[23].structtype[0].field[3].name "anotherfolder" +datatype[23].structtype[0].field[3].datatype 294108848 +datatype[23].structtype[0].field[3].detailedtype "" +datatype[24].id -2092985853 +datatype[24].structtype[0].name "mystruct" +datatype[24].structtype[0].version 0 +datatype[24].structtype[0].compresstype NONE +datatype[24].structtype[0].compresslevel 0 +datatype[24].structtype[0].compressthreshold 95 +datatype[24].structtype[0].compressminsize 800 +datatype[24].structtype[0].field[0].name "bytearr" +datatype[24].structtype[0].field[0].datatype 49942803 +datatype[24].structtype[0].field[0].detailedtype "" +datatype[24].structtype[0].field[1].name "mymap" +datatype[24].structtype[0].field[1].datatype 339965458 +datatype[24].structtype[0].field[1].detailedtype "" +datatype[24].structtype[0].field[2].name "title" +datatype[24].structtype[0].field[2].datatype 2 +datatype[24].structtype[0].field[2].detailedtype "" +datatype[24].structtype[0].field[3].name "structfield" +datatype[24].structtype[0].field[3].datatype 2 +datatype[24].structtype[0].field[3].detailedtype "" +datatype[25].id 109267174 +datatype[25].structtype[0].name "sct" +datatype[25].structtype[0].version 0 +datatype[25].structtype[0].compresstype NONE +datatype[25].structtype[0].compresslevel 0 +datatype[25].structtype[0].compressthreshold 95 +datatype[25].structtype[0].compressminsize 800 +datatype[25].structtype[0].field[0].name "s1" +datatype[25].structtype[0].field[0].datatype 2 +datatype[25].structtype[0].field[0].detailedtype "" +datatype[25].structtype[0].field[1].name "s2" +datatype[25].structtype[0].field[1].datatype 2 +datatype[25].structtype[0].field[1].detailedtype "" datatype[26].id 1328581348 datatype[26].structtype[0].name "types.header" datatype[26].structtype[0].version 0 diff --git a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg index 9be843c89aa..61c92eee8d1 100644 --- a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg +++ b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg @@ -27,7 +27,7 @@ documenttype[1].version 0 documenttype[1].headerstruct 1328581348 documenttype[1].bodystruct 0 documenttype[1].inherits[0].id 8 -documenttype[1].datatype[0].id -1368624373 +documenttype[1].datatype[0].id 1328581348 documenttype[1].datatype[0].type STRUCT documenttype[1].datatype[0].array.element.id 0 documenttype[1].datatype[0].map.key.id 0 @@ -36,29 +36,14 @@ documenttype[1].datatype[0].wset.key.id 0 documenttype[1].datatype[0].wset.createifnonexistent false documenttype[1].datatype[0].wset.removeifzero false documenttype[1].datatype[0].annotationref.annotation.id 0 -documenttype[1].datatype[0].sstruct.name "other_doc" +documenttype[1].datatype[0].sstruct.name "types.header" documenttype[1].datatype[0].sstruct.version 0 documenttype[1].datatype[0].sstruct.compression.type NONE documenttype[1].datatype[0].sstruct.compression.level 0 documenttype[1].datatype[0].sstruct.compression.threshold 95 documenttype[1].datatype[0].sstruct.compression.minsize 200 -documenttype[1].datatype[1].id 1328581348 -documenttype[1].datatype[1].type STRUCT -documenttype[1].datatype[1].array.element.id 0 -documenttype[1].datatype[1].map.key.id 0 -documenttype[1].datatype[1].map.value.id 0 -documenttype[1].datatype[1].wset.key.id 0 -documenttype[1].datatype[1].wset.createifnonexistent false -documenttype[1].datatype[1].wset.removeifzero false -documenttype[1].datatype[1].annotationref.annotation.id 0 -documenttype[1].datatype[1].sstruct.name "types.header" -documenttype[1].datatype[1].sstruct.version 0 -documenttype[1].datatype[1].sstruct.compression.type NONE -documenttype[1].datatype[1].sstruct.compression.level 0 -documenttype[1].datatype[1].sstruct.compression.threshold 95 -documenttype[1].datatype[1].sstruct.compression.minsize 200 -documenttype[1].datatype[1].sstruct.field[0].name "doc_field" -documenttype[1].datatype[1].sstruct.field[0].id 819293364 -documenttype[1].datatype[1].sstruct.field[0].datatype -1368624373 -documenttype[1].datatype[1].sstruct.field[0].detailedtype "" +documenttype[1].datatype[0].sstruct.field[0].name "doc_field" +documenttype[1].datatype[0].sstruct.field[0].id 819293364 +documenttype[1].datatype[0].sstruct.field[0].datatype -1368624373 +documenttype[1].datatype[0].sstruct.field[0].detailedtype "" documenttype[1].fieldsets{[document]}.fields[0] "doc_field" diff --git a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg b/config-model/src/test/derived/inheritance/mother/documentmanager.cfg deleted file mode 100644 index 3cf7eae655d..00000000000 --- a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg +++ /dev/null @@ -1,176 +0,0 @@ -enablecompression false -usev8geopositions false -datatype[-126593034].id -126593034 -datatype[-126593034].structtype[single].name "child.body" -datatype[-126593034].structtype[single].version 0 -datatype[-141935690].id -141935690 -datatype[-141935690].structtype[single].name "search_smartsummary" -datatype[-141935690].structtype[single].version 0 -datatype[-141935690].structtype[single].field[abstract].datatype 2 -datatype[-141935690].structtype[single].field[abstract].name "abstract" -datatype[-141935690].structtype[single].field[dispurl].datatype 2 -datatype[-141935690].structtype[single].field[dispurl].name "dispurl" -datatype[-141935690].structtype[single].field[title].datatype 2 -datatype[-141935690].structtype[single].field[title].name "title" -datatype[-1467672569].id -1467672569 -datatype[-1467672569].structtype[single].name "child_search.body" -datatype[-1467672569].structtype[single].version 0 -datatype[-154107656].id -154107656 -datatype[-154107656].documenttype[single].bodystruct 978262812 -datatype[-154107656].documenttype[single].headerstruct 990971719 -datatype[-154107656].documenttype[single].name "grandparent" -datatype[-154107656].documenttype[single].version 0 -datatype[-158393403].id -158393403 -datatype[-158393403].documenttype[single].bodystruct -1989003153 -datatype[-158393403].documenttype[single].headerstruct 1306663898 -datatype[-158393403].documenttype[single].name "mother" -datatype[-158393403].documenttype[single].version 0 -datatype[-158393403].documenttype[single].inherits[grandparent].name "grandparent" -datatype[-158393403].documenttype[single].inherits[grandparent].version 0 -datatype[-1740240543].id -1740240543 -datatype[-1740240543].structtype[single].name "search_feature" -datatype[-1740240543].structtype[single].version 0 -datatype[-1740240543].structtype[single].field[name].datatype 2 -datatype[-1740240543].structtype[single].field[name].name "name" -datatype[-1740240543].structtype[single].field[value].datatype 5 -datatype[-1740240543].structtype[single].field[value].name "value" -datatype[-1742340170].id -1742340170 -datatype[-1742340170].structtype[single].name "father.body" -datatype[-1742340170].structtype[single].version 0 -datatype[-1852215954].id -1852215954 -datatype[-1852215954].structtype[single].name "mother_search.body" -datatype[-1852215954].structtype[single].version 0 -datatype[-1962244686].id -1962244686 -datatype[-1962244686].structtype[single].name "father_search.header" -datatype[-1962244686].structtype[single].version 0 -datatype[-1962244686].structtype[single].field[onlyfather].datatype 2 -datatype[-1962244686].structtype[single].field[onlyfather].name "onlyfather" -datatype[-1962244686].structtype[single].field[onlygrandparent].datatype 0 -datatype[-1962244686].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-1962244686].structtype[single].field[overridden].datatype 0 -datatype[-1962244686].structtype[single].field[overridden].name "overridden" -datatype[-1989003153].id -1989003153 -datatype[-1989003153].structtype[single].name "mother.body" -datatype[-1989003153].structtype[single].version 0 -datatype[-205818510].id -205818510 -datatype[-205818510].structtype[single].name "child_search.header" -datatype[-205818510].structtype[single].version 0 -datatype[-205818510].structtype[single].field[onlychild].datatype 2 -datatype[-205818510].structtype[single].field[onlychild].name "onlychild" -datatype[-205818510].structtype[single].field[onlyfather].datatype 2 -datatype[-205818510].structtype[single].field[onlyfather].name "onlyfather" -datatype[-205818510].structtype[single].field[onlygrandparent].datatype 0 -datatype[-205818510].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-205818510].structtype[single].field[onlymother].datatype 2 -datatype[-205818510].structtype[single].field[onlymother].name "onlymother" -datatype[-205818510].structtype[single].field[overridden].datatype 0 -datatype[-205818510].structtype[single].field[overridden].name "overridden" -datatype[-384824039].id -384824039 -datatype[-384824039].structtype[single].name "mother_search.header" -datatype[-384824039].structtype[single].version 0 -datatype[-384824039].structtype[single].field[onlygrandparent].datatype 0 -datatype[-384824039].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[-384824039].structtype[single].field[onlymother].datatype 2 -datatype[-384824039].structtype[single].field[onlymother].name "onlymother" -datatype[-384824039].structtype[single].field[overridden].datatype 0 -datatype[-384824039].structtype[single].field[overridden].name "overridden" -datatype[-52742073].id -52742073 -datatype[-52742073].structtype[single].name "father_search.body" -datatype[-52742073].structtype[single].version 0 -datatype[-580592339].id -580592339 -datatype[-580592339].documenttype[single].bodystruct -1467672569 -datatype[-580592339].documenttype[single].headerstruct -205818510 -datatype[-580592339].documenttype[single].name "child_search" -datatype[-580592339].documenttype[single].version 0 -datatype[-876064862].id -876064862 -datatype[-876064862].structtype[single].name "search_position" -datatype[-876064862].structtype[single].version 0 -datatype[-876064862].structtype[single].field[x].datatype 0 -datatype[-876064862].structtype[single].field[x].name "x" -datatype[-876064862].structtype[single].field[y].datatype 0 -datatype[-876064862].structtype[single].field[y].name "y" -datatype[1306663898].id 1306663898 -datatype[1306663898].structtype[single].name "mother.header" -datatype[1306663898].structtype[single].version 0 -datatype[1306663898].structtype[single].field[onlymother].datatype 2 -datatype[1306663898].structtype[single].field[onlymother].name "onlymother" -datatype[1464571117].id 1464571117 -datatype[1464571117].documenttype[single].bodystruct -52742073 -datatype[1464571117].documenttype[single].headerstruct -1962244686 -datatype[1464571117].documenttype[single].name "father_search" -datatype[1464571117].documenttype[single].version 0 -datatype[147991900].id 147991900 -datatype[147991900].arraytype[single].datatype -1740240543 -datatype[1530060044].id 1530060044 -datatype[1530060044].structtype[single].name "grandparent_search.header" -datatype[1530060044].structtype[single].version 0 -datatype[1530060044].structtype[single].field[onlygrandparent].datatype 0 -datatype[1530060044].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[1530060044].structtype[single].field[overridden].datatype 0 -datatype[1530060044].structtype[single].field[overridden].name "overridden" -datatype[1845861921].id 1845861921 -datatype[1845861921].structtype[single].name "grandparent_search.body" -datatype[1845861921].structtype[single].version 0 -datatype[2126589281].id 2126589281 -datatype[2126589281].structtype[single].name "father.header" -datatype[2126589281].structtype[single].version 0 -datatype[2126589281].structtype[single].field[onlyfather].datatype 2 -datatype[2126589281].structtype[single].field[onlyfather].name "onlyfather" -datatype[328953555].id 328953555 -datatype[328953555].documenttype[single].bodystruct 1845861921 -datatype[328953555].documenttype[single].headerstruct 1530060044 -datatype[328953555].documenttype[single].name "grandparent_search" -datatype[328953555].documenttype[single].version 0 -datatype[464784087].id 464784087 -datatype[464784087].structtype[single].name "search_uri" -datatype[464784087].structtype[single].version 0 -datatype[464784087].structtype[single].field[all].datatype 2 -datatype[464784087].structtype[single].field[all].name "all" -datatype[464784087].structtype[single].field[fragment].datatype 2 -datatype[464784087].structtype[single].field[fragment].name "fragment" -datatype[464784087].structtype[single].field[host].datatype 2 -datatype[464784087].structtype[single].field[host].name "host" -datatype[464784087].structtype[single].field[path].datatype 2 -datatype[464784087].structtype[single].field[path].name "path" -datatype[464784087].structtype[single].field[port].datatype 0 -datatype[464784087].structtype[single].field[port].name "port" -datatype[464784087].structtype[single].field[query].datatype 2 -datatype[464784087].structtype[single].field[query].name "query" -datatype[464784087].structtype[single].field[scheme].datatype 2 -datatype[464784087].structtype[single].field[scheme].name "scheme" -datatype[644645734].id 644645734 -datatype[644645734].documenttype[single].bodystruct -1852215954 -datatype[644645734].documenttype[single].headerstruct -384824039 -datatype[644645734].documenttype[single].name "mother_search" -datatype[644645734].documenttype[single].version 0 -datatype[746267614].id 746267614 -datatype[746267614].documenttype[single].bodystruct -126593034 -datatype[746267614].documenttype[single].headerstruct 81425825 -datatype[746267614].documenttype[single].name "child" -datatype[746267614].documenttype[single].version 0 -datatype[746267614].documenttype[single].inherits[father].name "father" -datatype[746267614].documenttype[single].inherits[father].version 0 -datatype[746267614].documenttype[single].inherits[mother].name "mother" -datatype[746267614].documenttype[single].inherits[mother].version 0 -datatype[81425825].id 81425825 -datatype[81425825].structtype[single].name "child.header" -datatype[81425825].structtype[single].version 0 -datatype[81425825].structtype[single].field[onlychild].datatype 2 -datatype[81425825].structtype[single].field[onlychild].name "onlychild" -datatype[978262812].id 978262812 -datatype[978262812].structtype[single].name "grandparent.body" -datatype[978262812].structtype[single].version 0 -datatype[986686494].id 986686494 -datatype[986686494].documenttype[single].bodystruct -1742340170 -datatype[986686494].documenttype[single].headerstruct 2126589281 -datatype[986686494].documenttype[single].name "father" -datatype[986686494].documenttype[single].version 0 -datatype[986686494].documenttype[single].inherits[grandparent].name "grandparent" -datatype[986686494].documenttype[single].inherits[grandparent].version 0 -datatype[990971719].id 990971719 -datatype[990971719].structtype[single].name "grandparent.header" -datatype[990971719].structtype[single].version 0 -datatype[990971719].structtype[single].field[onlygrandparent].datatype 0 -datatype[990971719].structtype[single].field[onlygrandparent].name "onlygrandparent" -datatype[990971719].structtype[single].field[overridden].datatype 0 -datatype[990971719].structtype[single].field[overridden].name "overridden" diff --git a/config-model/src/test/examples/fieldoftypedocument.cfg b/config-model/src/test/examples/fieldoftypedocument.cfg index 5753ae556a6..82a30012a07 100644 --- a/config-model/src/test/examples/fieldoftypedocument.cfg +++ b/config-model/src/test/examples/fieldoftypedocument.cfg @@ -23,37 +23,37 @@ datatype[1].structtype[0].compressminsize 800 datatype[1].structtype[0].field[0].name "soundtrack" datatype[1].structtype[0].field[0].datatype 1412693671 datatype[1].structtype[0].field[0].detailedtype "" -datatype[2].id -1383388565 -datatype[2].documenttype[0].name "book" +datatype[2].id 1412693671 +datatype[2].documenttype[0].name "music" datatype[2].documenttype[0].version 0 datatype[2].documenttype[0].inherits[0].name "document" datatype[2].documenttype[0].inherits[0].version 0 -datatype[2].documenttype[0].headerstruct -1344444812 +datatype[2].documenttype[0].headerstruct -1910204744 datatype[2].documenttype[0].bodystruct 0 -datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack" -datatype[3].id -1910204744 -datatype[3].structtype[0].name "music.header" -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].compresstype NONE -datatype[3].structtype[0].compresslevel 0 -datatype[3].structtype[0].compressthreshold 95 -datatype[3].structtype[0].compressminsize 800 -datatype[3].structtype[0].field[0].name "intfield" -datatype[3].structtype[0].field[0].datatype 0 -datatype[3].structtype[0].field[0].detailedtype "" -datatype[3].structtype[0].field[1].name "stringfield" -datatype[3].structtype[0].field[1].datatype 2 -datatype[3].structtype[0].field[1].detailedtype "" -datatype[3].structtype[0].field[2].name "longfield" -datatype[3].structtype[0].field[2].datatype 4 -datatype[3].structtype[0].field[2].detailedtype "" -datatype[4].id 1412693671 -datatype[4].documenttype[0].name "music" -datatype[4].documenttype[0].version 0 -datatype[4].documenttype[0].inherits[0].name "document" -datatype[4].documenttype[0].inherits[0].version 0 -datatype[4].documenttype[0].headerstruct -1910204744 -datatype[4].documenttype[0].bodystruct 0 -datatype[4].documenttype[0].fieldsets{[document]}.fields[0] "intfield" -datatype[4].documenttype[0].fieldsets{[document]}.fields[1] "longfield" -datatype[4].documenttype[0].fieldsets{[document]}.fields[2] "stringfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "intfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[1] "longfield" +datatype[2].documenttype[0].fieldsets{[document]}.fields[2] "stringfield" +datatype[3].id -1383388565 +datatype[3].documenttype[0].name "book" +datatype[3].documenttype[0].version 0 +datatype[3].documenttype[0].inherits[0].name "document" +datatype[3].documenttype[0].inherits[0].version 0 +datatype[3].documenttype[0].headerstruct -1344444812 +datatype[3].documenttype[0].bodystruct 0 +datatype[3].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack" +datatype[4].id -1910204744 +datatype[4].structtype[0].name "music.header" +datatype[4].structtype[0].version 0 +datatype[4].structtype[0].compresstype NONE +datatype[4].structtype[0].compresslevel 0 +datatype[4].structtype[0].compressthreshold 95 +datatype[4].structtype[0].compressminsize 800 +datatype[4].structtype[0].field[0].name "intfield" +datatype[4].structtype[0].field[0].datatype 0 +datatype[4].structtype[0].field[0].detailedtype "" +datatype[4].structtype[0].field[1].name "stringfield" +datatype[4].structtype[0].field[1].datatype 2 +datatype[4].structtype[0].field[1].detailedtype "" +datatype[4].structtype[0].field[2].name "longfield" +datatype[4].structtype[0].field[2].datatype 4 +datatype[4].structtype[0].field[2].detailedtype "" diff --git a/config-model/src/test/examples/structresult.cfg b/config-model/src/test/examples/structresult.cfg index 639d91c892d..b5b90245858 100755..100644 --- a/config-model/src/test/examples/structresult.cfg +++ b/config-model/src/test/examples/structresult.cfg @@ -13,32 +13,32 @@ datatype[0].structtype[0].field[0].detailedtype "" datatype[0].structtype[0].field[1].name "y" datatype[0].structtype[0].field[1].datatype 0 datatype[0].structtype[0].field[1].detailedtype "" -datatype[1].id 93505813 -datatype[1].structtype[0].name "bar" -datatype[1].structtype[0].version 0 -datatype[1].structtype[0].compresstype NONE -datatype[1].structtype[0].compresslevel 0 -datatype[1].structtype[0].compressthreshold 95 -datatype[1].structtype[0].compressminsize 800 -datatype[1].structtype[0].field[0].name "humbe" -datatype[1].structtype[0].field[0].datatype 97614088 -datatype[1].structtype[0].field[0].detailedtype "" -datatype[2].id 97614088 -datatype[2].structtype[0].name "foo" +datatype[1].id -1245205573 +datatype[1].arraytype[0].datatype 97614088 +datatype[2].id 93505813 +datatype[2].structtype[0].name "bar" datatype[2].structtype[0].version 0 datatype[2].structtype[0].compresstype NONE datatype[2].structtype[0].compresslevel 0 datatype[2].structtype[0].compressthreshold 95 datatype[2].structtype[0].compressminsize 800 -datatype[2].structtype[0].field[0].name "fubar" -datatype[2].structtype[0].field[0].datatype 0 +datatype[2].structtype[0].field[0].name "humbe" +datatype[2].structtype[0].field[0].datatype 97614088 datatype[2].structtype[0].field[0].detailedtype "" -datatype[2].structtype[0].field[1].name "bar" -datatype[2].structtype[0].field[1].id[0].id 1 -datatype[2].structtype[0].field[1].datatype 2 -datatype[2].structtype[0].field[1].detailedtype "" -datatype[3].id -1245205573 -datatype[3].arraytype[0].datatype 97614088 +datatype[3].id 97614088 +datatype[3].structtype[0].name "foo" +datatype[3].structtype[0].version 0 +datatype[3].structtype[0].compresstype NONE +datatype[3].structtype[0].compresslevel 0 +datatype[3].structtype[0].compressthreshold 95 +datatype[3].structtype[0].compressminsize 800 +datatype[3].structtype[0].field[0].name "fubar" +datatype[3].structtype[0].field[0].datatype 0 +datatype[3].structtype[0].field[0].detailedtype "" +datatype[3].structtype[0].field[1].name "bar" +datatype[3].structtype[0].field[1].id[0].id 1 +datatype[3].structtype[0].field[1].datatype 2 +datatype[3].structtype[0].field[1].detailedtype "" datatype[4].id -1910204744 datatype[4].structtype[0].name "music.header" datatype[4].structtype[0].version 0 diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java index a08ec110219..8ece5cd0fe4 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java @@ -13,6 +13,10 @@ import static helpers.CompareConfigTestHelper.assertSerializedConfigFileEquals; public abstract class AbstractSchemaTestCase { protected static void assertConfigFile(String filename, String cfg) throws IOException { + IOUtils.writeFile(filename + ".actual", cfg, false); + if (! cfg.endsWith("\n")) { + IOUtils.writeFile(filename + ".actual", "\n", true); + } assertSerializedConfigFileEquals(filename, cfg); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java index 87c7c898f96..3cfde4c4d19 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java @@ -1,13 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.application.validation.change.search; +import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import org.junit.Test; import java.util.List; import static com.yahoo.vespa.model.application.validation.change.ConfigChangeTestUtils.newRestartAction; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; public class AttributeChangeValidatorTest { @@ -22,7 +27,9 @@ public class AttributeChangeValidatorTest { currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), - nextDocType()); + nextDocType(), + new ValidationOverrides(List.of()), + new ManualClock().instant()); } @Override @@ -202,4 +209,19 @@ public class AttributeChangeValidatorTest { "Field 'f1' changed: change hnsw index property " + "'neighbors-to-explore-at-insert' from '200' to '100'")); } + + @Test + public void removing_paged_requires_override() throws Exception { + try { + new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", + "field f1 type tensor(x[10]) { indexing: attribute }"). + assertValidation(); + fail("Expected exception on removal of 'paged'"); + } + catch (ValidationOverrides.ValidationException e) { + assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); + } + } + + } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java index d5c84be2008..aba5c2aa05c 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java @@ -2,7 +2,9 @@ package com.yahoo.vespa.model.application.validation.change.search; import com.yahoo.config.application.api.ValidationId; +import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction; import org.junit.Test; @@ -25,7 +27,9 @@ public class DocumentDatabaseChangeValidatorTest { currentDb(), currentDocType(), nextDb(), - nextDocType()); + nextDocType(), + new ValidationOverrides(List.of()), + new ManualClock().instant()); } @Override diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg index 08b0e6809ce..26aab134699 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg @@ -1 +1 @@ -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg index fc77c5d82fa..5f48b7b75c2 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg @@ -11,4 +11,4 @@ queryprofile[1].property[0].overridable "" queryprofile[1].reference[0].name "a" queryprofile[1].reference[0].value "a1" queryprofile[1].reference[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg index 337623bc448..954a6b8d68a 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg @@ -18,4 +18,4 @@ queryprofile[0].queryprofilevariant[0].fordimensionvalues[2] "sc" queryprofile[0].queryprofilevariant[0].property[0].name "scthumbnail.sourcecountry" queryprofile[0].queryprofilevariant[0].property[0].value "uk" queryprofile[0].queryprofilevariant[0].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg index b3e41d88233..33ef2610d1d 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg @@ -27,4 +27,4 @@ queryprofile[1].type "" queryprofile[1].reference[0].name "source.news" queryprofile[1].reference[0].value "backend/news" queryprofile[1].reference[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg index 8cf8385f397..c10e0b93560 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg @@ -46,4 +46,4 @@ queryprofile[2].type "" queryprofile[2].property[0].name "a" queryprofile[2].property[0].value "a1" queryprofile[2].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg index 0bce407e8ef..2f9879dc721 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg @@ -33,8 +33,8 @@ queryprofile[1].queryprofilevariant[0].property[0].value "default" queryprofile[1].queryprofilevariant[0].property[0].overridable "" queryprofile[1].queryprofilevariant[0].reference[0].name "model" queryprofile[1].queryprofilevariant[0].reference[0].value "querylove" -queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*" queryprofile[1].queryprofilevariant[0].reference[0].overridable "" +queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*" queryprofile[1].queryprofilevariant[1].fordimensionvalues[1] "default" queryprofile[1].queryprofilevariant[1].property[0].name "model.defaultIndex" queryprofile[1].queryprofilevariant[1].property[0].value "default" diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg index 54997e152f3..18fc48fc7c9 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg @@ -103,4 +103,4 @@ queryprofiletype[2].field[0].type "string" queryprofiletype[2].field[0].overridable false queryprofiletype[2].field[0].mandatory false queryprofiletype[2].field[0].alias "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg index fdfd1955491..bb125065671 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg @@ -20,9 +20,9 @@ queryprofile[2].queryprofilevariant[0].property[0].value "a.b.x1" queryprofile[2].queryprofilevariant[0].property[0].overridable "" queryprofile[2].queryprofilevariant[0].reference[0].name "a" queryprofile[2].queryprofilevariant[0].reference[0].value "a2" -queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[2].queryprofilevariant[0].reference[0].overridable "" +queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[2].queryprofilevariant[1].property[0].name "a.b" queryprofile[2].queryprofilevariant[1].property[0].value "a.b.x2" queryprofile[2].queryprofilevariant[1].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg index 6f66a3bd441..f867ca9a56b 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg @@ -17,4 +17,4 @@ queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "x2" queryprofile[1].queryprofilevariant[1].property[0].name "a.b" queryprofile[1].queryprofilevariant[1].property[0].value "a.b.x2" queryprofile[1].queryprofilevariant[1].property[0].overridable "" -enableGroupingSessionCache true
\ No newline at end of file +enableGroupingSessionCache true diff --git a/config-model/src/test/java/helpers/CompareConfigTestHelper.java b/config-model/src/test/java/helpers/CompareConfigTestHelper.java index 18c0723c6a2..ba06ecc9397 100644 --- a/config-model/src/test/java/helpers/CompareConfigTestHelper.java +++ b/config-model/src/test/java/helpers/CompareConfigTestHelper.java @@ -19,6 +19,10 @@ import static org.junit.Assert.assertEquals; public class CompareConfigTestHelper { public static void assertSerializedConfigFileEquals(String filename, String actual) throws IOException { + IOUtils.writeFile(filename + ".actual", actual, false); + if (! actual.endsWith("\n")) { + IOUtils.writeFile(filename + ".actual", "\n", true); + } assertSerializedConfigEquals(IOUtils.readFile(new File(filename)), actual, false); } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java index 182b924e877..958a37e1432 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java @@ -58,8 +58,8 @@ public final class Capacity { */ public NodeType type() { return type; } - public Capacity withGroups(int groups) { - return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type); + public Capacity withLimits(ClusterResources min, ClusterResources max) { + return new Capacity(min, max, required, canFail, type); } @Override diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java index b887a2a93e6..5daaee4299e 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java @@ -17,6 +17,7 @@ public class NodeResources { private static final double diskUnitCost = 0.0003; private static final NodeResources zero = new NodeResources(0, 0, 0, 0); + private static final NodeResources unspecified = new NodeResources(0, 0, 0, 0); public enum DiskSpeed { @@ -125,46 +126,56 @@ public class NodeResources { } public NodeResources withVcpu(double vcpu) { + ensureSpecified(); if (vcpu == this.vcpu) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withMemoryGb(double memoryGb) { + ensureSpecified(); if (memoryGb == this.memoryGb) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withDiskGb(double diskGb) { + ensureSpecified(); if (diskGb == this.diskGb) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources withBandwidthGbps(double bandwidthGbps) { + ensureSpecified(); if (bandwidthGbps == this.bandwidthGbps) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources with(DiskSpeed diskSpeed) { + ensureSpecified(); if (diskSpeed == this.diskSpeed) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } public NodeResources with(StorageType storageType) { + ensureSpecified(); if (storageType == this.storageType) return this; return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType); } /** Returns this with disk speed and storage type set to any */ public NodeResources justNumbers() { + if (isUnspecified()) return unspecified(); return with(NodeResources.DiskSpeed.any).with(StorageType.any); } /** Returns this with all numbers set to 0 */ public NodeResources justNonNumbers() { + if (isUnspecified()) return unspecified(); return withVcpu(0).withMemoryGb(0).withDiskGb(0).withBandwidthGbps(0); } public NodeResources subtract(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if ( ! this.isInterchangeableWith(other)) throw new IllegalArgumentException(this + " and " + other + " are not interchangeable"); return new NodeResources(vcpu - other.vcpu, @@ -176,6 +187,7 @@ public class NodeResources { } public NodeResources add(NodeResources other) { + ensureSpecified(); if ( ! this.isInterchangeableWith(other)) throw new IllegalArgumentException(this + " and " + other + " are not interchangeable"); return new NodeResources(vcpu + other.vcpu, @@ -187,6 +199,8 @@ public class NodeResources { } private boolean isInterchangeableWith(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if (this.diskSpeed != DiskSpeed.any && other.diskSpeed != DiskSpeed.any && this.diskSpeed != other.diskSpeed) return false; if (this.storageType != StorageType.any && other.storageType != StorageType.any && this.storageType != other.storageType) @@ -248,6 +262,8 @@ public class NodeResources { /** Returns true if all the resources of this are the same or larger than the given resources */ public boolean satisfies(NodeResources other) { + ensureSpecified(); + other.ensureSpecified(); if (this.vcpu < other.vcpu) return false; if (this.memoryGb < other.memoryGb) return false; if (this.diskGb < other.diskGb) return false; @@ -276,9 +292,14 @@ public class NodeResources { return true; } - public static NodeResources unspecified() { return zero; } + public static NodeResources unspecified() { return unspecified; } - public boolean isUnspecified() { return this.equals(zero); } + public boolean isUnspecified() { return this == unspecified; } + + private void ensureSpecified() { + if (isUnspecified()) + throw new IllegalStateException("Cannot perform this on unspecified resources"); + } // Returns squared euclidean distance of the relevant numerical values of two node resources public double distanceTo(NodeResources other) { diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java index c7f6530f81c..158df654439 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java @@ -30,10 +30,10 @@ import java.util.logging.Logger; * * @author hmusum */ -public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer { +public class ConfigProxyRpcServer implements Runnable, TargetWatcher { private final static Logger log = Logger.getLogger(ConfigProxyRpcServer.class.getName()); - private static final int TRACELEVEL = 6; + static final int TRACELEVEL = 6; private final Spec spec; private final Supervisor supervisor; @@ -66,7 +66,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer } catch (InterruptedException e) { throw new RuntimeException(e); } - supervisor.transport().shutdown(); + supervisor.transport().shutdown().join(); } Spec getSpec() { @@ -79,10 +79,6 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer this::ping) .methodDesc("ping") .returnDesc(0, "ret code", "return code, 0 is OK")); - supervisor.addMethod(new Method("printStatistics", "", "s", - this::printStatistics) - .methodDesc("printStatistics") - .returnDesc(0, "statistics", "Statistics for server")); supervisor.addMethod(new Method("listCachedConfig", "", "S", this::listCachedConfig) .methodDesc("list cached configs)") @@ -145,26 +141,6 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer }); } - /** - * Returns a String with statistics data for the server. - * - * @param req a Request - */ - private void printStatistics(Request req) { - dispatchRpcRequest(req, () -> { - StringBuilder sb = new StringBuilder(); - sb.append("\nDelayed responses queue size: "); - sb.append(proxyServer.delayedResponses().size()); - sb.append("\nContents: "); - for (DelayedResponse delayed : proxyServer.delayedResponses().responses()) { - sb.append(delayed.getRequest().toString()).append("\n"); - } - - req.returnValues().add(new StringValue(sb.toString())); - req.returnRequest(); - }); - } - private void listCachedConfig(Request req) { dispatchRpcRequest(req, () -> listCachedConfig(req, false)); } @@ -201,7 +177,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void invalidateCache(Request req) { dispatchRpcRequest(req, () -> { - proxyServer.getMemoryCache().clear(); + proxyServer.memoryCache().clear(); String[] s = new String[2]; s[0] = "0"; s[1] = "success"; @@ -237,7 +213,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void dumpCache(Request req) { dispatchRpcRequest(req, () -> { - final MemoryCache memoryCache = proxyServer.getMemoryCache(); + final MemoryCache memoryCache = proxyServer.memoryCache(); req.returnValues().add(new StringValue(memoryCache.dumpCacheToDisk(req.parameters().get(0).asString(), memoryCache))); req.returnRequest(); }); @@ -269,12 +245,13 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer * @param request a Request */ private void getConfigImpl(JRTServerConfigRequest request) { + ResponseHandler responseHandler = new ResponseHandler(); request.getRequestTrace().trace(TRACELEVEL, "Config proxy getConfig()"); log.log(Level.FINE, () ->"getConfig: " + request.getShortDescription() + ",config checksums=" + request.getRequestConfigChecksums()); if (!request.validateParameters()) { // Error code is set in verifyParameters if parameters are not OK. log.log(Level.WARNING, "Parameters for request " + request + " did not validate: " + request.errorCode() + " : " + request.errorMessage()); - returnErrorResponse(request, request.errorCode(), "Parameters for request " + request.getShortDescription() + " did not validate: " + request.errorMessage()); + responseHandler.returnErrorResponse(request, request.errorCode(), "Parameters for request " + request.getShortDescription() + " did not validate: " + request.errorMessage()); return; } try { @@ -282,13 +259,13 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer if (config == null) { log.log(Level.FINEST, () -> "No config received yet for " + request.getShortDescription() + ", not sending response"); } else if (ProxyServer.configOrGenerationHasChanged(config, request)) { - returnOkResponse(request, config); + responseHandler.returnOkResponse(request, config); } else { log.log(Level.FINEST, () -> "No new config for " + request.getShortDescription() + ", not sending response"); } } catch (Exception e) { e.printStackTrace(); - returnErrorResponse(request, com.yahoo.vespa.config.ErrorCode.INTERNAL_ERROR, e.getMessage()); + responseHandler.returnErrorResponse(request, com.yahoo.vespa.config.ErrorCode.INTERNAL_ERROR, e.getMessage()); } } @@ -302,7 +279,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer private void listCachedConfig(Request req, boolean full) { String[] ret; - MemoryCache cache = proxyServer.getMemoryCache(); + MemoryCache cache = proxyServer.memoryCache(); ret = new String[cache.size()]; int i = 0; for (RawConfig config : cache.values()) { @@ -348,29 +325,4 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer // requesting this config? } - public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { - request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnOkResponse()"); - request.addOkResponse(config.getPayload(), - config.getGeneration(), - config.applyOnRestart(), - config.getPayloadChecksums()); - log.log(Level.FINE, () -> "Return response: " + request.getShortDescription() + ",config checksums=" + config.getPayloadChecksums() + - ",generation=" + config.getGeneration()); - log.log(Level.FINEST, () -> "Config payload in response for " + request.getShortDescription() + ":" + config.getPayload()); - - - // TODO Catch exception for now, since the request might have been returned in CheckDelayedResponse - // TODO Move logic so that all requests are returned in CheckDelayedResponse - try { - request.getRequest().returnRequest(); - } catch (IllegalStateException e) { - log.log(Level.FINE, () -> "Something bad happened when sending response for '" + request.getShortDescription() + "':" + e.getMessage()); - } - } - - public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { - request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnErrorResponse()"); - request.addErrorResponse(errorCode, message); - request.getRequest().returnRequest(); - } } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java index 6e5fe2d3fd8..dae732e56ec 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigSourceClient.java @@ -16,7 +16,7 @@ interface ConfigSourceClient { RawConfig getConfig(RawConfig input, JRTServerConfigRequest request); - void cancel(); + void shutdown(); void shutdownSourceConnections(); @@ -26,4 +26,6 @@ interface ConfigSourceClient { DelayedResponses delayedResponses(); + MemoryCache memoryCache(); + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java index f77bd4b9138..0e8ebe0d9c9 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java @@ -6,10 +6,12 @@ import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.yolean.Exceptions; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.logging.Level; import java.util.logging.Logger; +import static com.yahoo.protect.Process.logAndDie; + /** * The run method of this class is executed periodically to return delayed responses * (requests use long polling, so config proxy needs to return a response when they time out). @@ -22,12 +24,13 @@ public class DelayedResponseHandler implements Runnable { private final DelayedResponses delayedResponses; private final MemoryCache memoryCache; - private final RpcServer rpcServer; + private final ResponseHandler responseHandler; + private final AtomicLong sentResponses = new AtomicLong(); - DelayedResponseHandler(DelayedResponses delayedResponses, MemoryCache memoryCache, RpcServer rpcServer) { + DelayedResponseHandler(DelayedResponses delayedResponses, MemoryCache memoryCache, ResponseHandler responseHandler) { this.delayedResponses = delayedResponses; this.memoryCache = memoryCache; - this.rpcServer = rpcServer; + this.responseHandler = responseHandler; } @Override @@ -41,25 +44,27 @@ public class DelayedResponseHandler implements Runnable { log.log(Level.FINEST, () -> "Running DelayedResponseHandler. There are " + delayedResponses.size() + " delayed responses. First one is " + delayedResponses.responses().peek()); DelayedResponse response; - AtomicInteger i = new AtomicInteger(0); while ((response = delayedResponses.responses().poll()) != null) { JRTServerConfigRequest request = response.getRequest(); ConfigCacheKey cacheKey = new ConfigCacheKey(request.getConfigKey(), request.getRequestDefMd5()); RawConfig config = memoryCache.get(cacheKey); if (config != null) { - rpcServer.returnOkResponse(request, config); - i.incrementAndGet(); + responseHandler.returnOkResponse(request, config); + sentResponses.incrementAndGet(); } else { log.log(Level.WARNING, "Timed out (timeout " + request.getTimeout() + ") getting config " + request.getConfigKey() + ", will retry"); } } - log.log(Level.FINEST, () -> "Finished running DelayedResponseHandler. " + i.get() + " delayed responses sent in " + - (System.currentTimeMillis() - start) + " ms"); + log.log(Level.FINEST, () -> "Finished running DelayedResponseHandler. " + sentResponses.get() + + " delayed responses sent in " + (System.currentTimeMillis() - start) + " ms"); } catch (Exception e) { // To avoid thread throwing exception and executor never running this again log.log(Level.WARNING, "Got exception in DelayedResponseHandler: " + Exceptions.toMessageString(e)); } catch (Throwable e) { - com.yahoo.protect.Process.logAndDie("Got error in DelayedResponseHandler, exiting: " + Exceptions.toMessageString(e)); + logAndDie("Got error in DelayedResponseHandler, exiting: " + Exceptions.toMessageString(e)); } } + + public long sentResponses() { return sentResponses.get(); } + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java index 6e90ad16f50..f1be03f07d4 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClient.java @@ -1,12 +1,14 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; -import java.util.logging.Level; -import com.yahoo.vespa.config.*; +import com.yahoo.vespa.config.ConfigCacheKey; +import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import java.util.Collections; import java.util.List; +import java.util.logging.Level; import java.util.logging.Logger; /** @@ -44,7 +46,7 @@ class MemoryCacheConfigClient implements ConfigSourceClient { } @Override - public void cancel() {} + public void shutdown() {} @Override public void shutdownSourceConnections() {} @@ -64,4 +66,7 @@ class MemoryCacheConfigClient implements ConfigSourceClient { return delayedResponses; } + @Override + public MemoryCache memoryCache() { return cache; } + } diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java index d063c45a3f7..0ae02e4c17b 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java @@ -1,27 +1,27 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; +import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Spec; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; -import java.util.logging.Level; import com.yahoo.log.LogSetup; import com.yahoo.log.event.Event; -import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.proxy.filedistribution.FileDistributionAndUrlDownload; import com.yahoo.yolean.system.CatchSignals; import java.util.List; +import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; import java.util.logging.Logger; import static com.yahoo.vespa.config.proxy.Mode.ModeName.DEFAULT; @@ -40,27 +40,24 @@ public class ProxyServer implements Runnable { private static final int JRT_TRANSPORT_THREADS = 4; static final String DEFAULT_PROXY_CONFIG_SOURCES = "tcp/localhost:19070"; - private final static Logger log = Logger.getLogger(ProxyServer.class.getName()); + private static final Logger log = Logger.getLogger(ProxyServer.class.getName()); + private final AtomicBoolean signalCaught = new AtomicBoolean(false); private final Supervisor supervisor; private final ConfigProxyRpcServer rpcServer; - private ConfigSourceSet configSource; - - private volatile ConfigSourceClient configClient; - - private final MemoryCache memoryCache; private final FileDistributionAndUrlDownload fileDistributionAndUrlDownload; + private ConfigSourceSet configSource; + private volatile ConfigSourceClient configClient; private volatile Mode mode = new Mode(DEFAULT); - ProxyServer(Spec spec, ConfigSourceSet source, MemoryCache memoryCache, ConfigSourceClient configClient) { - this.configSource = source; - supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true); + ProxyServer(Spec spec, ConfigSourceSet source, ConfigSourceClient configClient) { + this.configSource = Objects.requireNonNull(source); log.log(Level.FINE, () -> "Using config source '" + source); - this.memoryCache = memoryCache; + this.supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true); this.rpcServer = createRpcServer(spec); - this.configClient = (configClient == null) ? createRpcClient(rpcServer, source, memoryCache) : configClient; + this.configClient = Objects.requireNonNull(configClient); this.fileDistributionAndUrlDownload = new FileDistributionAndUrlDownload(supervisor, source); } @@ -97,12 +94,12 @@ public class ProxyServer implements Runnable { switch (newMode.getMode()) { case MEMORYCACHE: configClient.shutdownSourceConnections(); - configClient = new MemoryCacheConfigClient(memoryCache); + configClient = new MemoryCacheConfigClient(configClient.memoryCache()); this.mode = new Mode(modeName); break; case DEFAULT: flush(); - configClient = createRpcClient(rpcServer, configSource, memoryCache); + configClient = createRpcClient(configSource); this.mode = new Mode(modeName); break; default: @@ -115,8 +112,8 @@ public class ProxyServer implements Runnable { return (spec == null) ? null : new ConfigProxyRpcServer(this, supervisor, spec); // TODO: Try to avoid first argument being 'this' } - private static RpcConfigSourceClient createRpcClient(RpcServer rpcServer, ConfigSourceSet source, MemoryCache memoryCache) { - return new RpcConfigSourceClient(rpcServer, source, memoryCache); + private static RpcConfigSourceClient createRpcClient(ConfigSourceSet source) { + return new RpcConfigSourceClient(new ResponseHandler(), source); } private void setupSignalHandler() { @@ -159,7 +156,7 @@ public class ProxyServer implements Runnable { Event.started("configproxy"); ConfigSourceSet configSources = new ConfigSourceSet(properties.configSources); - ProxyServer proxyServer = new ProxyServer(new Spec(null, port), configSources, new MemoryCache(), null); + ProxyServer proxyServer = new ProxyServer(new Spec(null, port), configSources, createRpcClient(configSources)); // catch termination and interrupt signal proxyServer.setupSignalHandler(); Thread proxyserverThread = threadFactory.newThread(proxyServer); @@ -169,7 +166,8 @@ public class ProxyServer implements Runnable { } static Properties getSystemProperties() { - final String[] inputConfigSources = System.getProperty("proxyconfigsources", DEFAULT_PROXY_CONFIG_SOURCES).split(","); + String[] inputConfigSources = System.getProperty("proxyconfigsources", + DEFAULT_PROXY_CONFIG_SOURCES).split(","); return new Properties(inputConfigSources); } @@ -184,15 +182,15 @@ public class ProxyServer implements Runnable { // Cancels all config instances and flushes the cache. When this method returns, // the cache will not be updated again before someone calls getConfig(). private synchronized void flush() { - memoryCache.clear(); - configClient.cancel(); + configClient.memoryCache().clear(); + configClient.shutdown(); } void stop() { Event.stopping("configproxy", "shutdown rpcServer"); if (rpcServer != null) rpcServer.shutdown(); Event.stopping("configproxy", "cancel configClient"); - if (configClient != null) configClient.cancel(); + configClient.shutdown(); Event.stopping("configproxy", "flush"); flush(); Event.stopping("configproxy", "close fileDistribution"); @@ -200,8 +198,8 @@ public class ProxyServer implements Runnable { Event.stopping("configproxy", "stop complete"); } - MemoryCache getMemoryCache() { - return memoryCache; + MemoryCache memoryCache() { + return configClient.memoryCache(); } String getActiveSourceConnection() { @@ -215,7 +213,7 @@ public class ProxyServer implements Runnable { void updateSourceConnections(List<String> sources) { configSource = new ConfigSourceSet(sources); flush(); - configClient = createRpcClient(rpcServer, configSource, memoryCache); + configClient = createRpcClient(configSource); } DelayedResponses delayedResponses() { diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java new file mode 100644 index 00000000000..c9cfbdd3e16 --- /dev/null +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ResponseHandler.java @@ -0,0 +1,63 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config.proxy; + +import com.yahoo.vespa.config.RawConfig; +import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; + +import static com.yahoo.vespa.config.proxy.ConfigProxyRpcServer.TRACELEVEL; + +/** + * An RPC server that handles config and file distribution requests. + * + * @author hmusum + */ +public class ResponseHandler { + + private final Optional<AtomicLong> sentResponses; + + public ResponseHandler() { + this(false); + } + + // For testing only + ResponseHandler(boolean trackResponses) { + sentResponses = trackResponses ? Optional.of(new AtomicLong()) : Optional.empty(); + } + + private final static Logger log = Logger.getLogger(ResponseHandler.class.getName()); + + public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { + request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnOkResponse()"); + request.addOkResponse(config.getPayload(), + config.getGeneration(), + config.applyOnRestart(), + config.getPayloadChecksums()); + log.log(Level.FINE, () -> "Return response: " + request.getShortDescription() + ",config checksums=" + config.getPayloadChecksums() + + ",generation=" + config.getGeneration()); + log.log(Level.FINEST, () -> "Config payload in response for " + request.getShortDescription() + ":" + config.getPayload()); + + + // TODO Catch exception for now, since the request might have been returned in CheckDelayedResponse + // TODO Move logic so that all requests are returned in CheckDelayedResponse + try { + request.getRequest().returnRequest(); + } catch (IllegalStateException e) { + log.log(Level.FINE, () -> "Something bad happened when sending response for '" + request.getShortDescription() + "':" + e.getMessage()); + } + sentResponses.ifPresent(AtomicLong::getAndIncrement); + } + + public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { + request.getRequestTrace().trace(TRACELEVEL, "Config proxy returnErrorResponse()"); + request.addErrorResponse(errorCode, message); + request.getRequest().returnRequest(); + } + + public long sentResponses() { return sentResponses.map(AtomicLong::get).orElse(0L); } + +} diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java index ab85c534251..e6a3d66f340 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java @@ -43,7 +43,7 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { private final Supervisor supervisor = new Supervisor(new Transport("config-source-client")); - private final RpcServer rpcServer; + private final ResponseHandler responseHandler; private final ConfigSourceSet configSourceSet; private final Map<ConfigCacheKey, Subscriber> activeSubscribers = new ConcurrentHashMap<>(); private final MemoryCache memoryCache; @@ -57,15 +57,15 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { Executors.newScheduledThreadPool(1, new DaemonThreadFactory("delayed responses")); private final ScheduledFuture<?> delayedResponsesFuture; - RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache) { - this.rpcServer = rpcServer; + RpcConfigSourceClient(ResponseHandler responseHandler, ConfigSourceSet configSourceSet) { + this.responseHandler = responseHandler; this.configSourceSet = configSourceSet; - this.memoryCache = memoryCache; + this.memoryCache = new MemoryCache(); this.delayedResponses = new DelayedResponses(); checkConfigSources(); nextConfigFuture = nextConfigScheduler.scheduleAtFixedRate(this, 0, 10, MILLISECONDS); this.requesters = new JrtConfigRequesters(); - DelayedResponseHandler command = new DelayedResponseHandler(delayedResponses, memoryCache, rpcServer); + DelayedResponseHandler command = new DelayedResponseHandler(delayedResponses, memoryCache, responseHandler); this.delayedResponsesFuture = delayedResponsesScheduler.scheduleAtFixedRate(command, 5, 1, SECONDS); } @@ -163,7 +163,7 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { } @Override - public void cancel() { + public void shutdown() { log.log(Level.FINE, "shutdownSourceConnections"); shutdownSourceConnections(); log.log(Level.FINE, "delayedResponsesFuture.cancel"); @@ -230,7 +230,7 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { log.log(Level.FINE, () -> "Call returnOkResponse for " + key + "," + generation); if (config.getPayload().getData().getByteLength() == 0) log.log(Level.WARNING, () -> "Call returnOkResponse for " + key + "," + generation + " with empty config"); - rpcServer.returnOkResponse(request, config); + responseHandler.returnOkResponse(request, config); } else { log.log(Level.INFO, "Could not remove " + key + " from delayedResponses queue, already removed"); } @@ -243,9 +243,10 @@ class RpcConfigSourceClient implements ConfigSourceClient, Runnable { } @Override - public DelayedResponses delayedResponses() { - return delayedResponses; - } + public DelayedResponses delayedResponses() { return delayedResponses; } + + @Override + public MemoryCache memoryCache() { return memoryCache; } private void updateWithNewConfig(RawConfig newConfig) { log.log(Level.FINE, () -> "config to be returned for '" + newConfig.getKey() + diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java index 625f1b5fe17..68570722117 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java @@ -4,8 +4,6 @@ package com.yahoo.vespa.config.proxy.filedistribution; import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Supervisor; -import com.yahoo.vespa.config.ConnectionPool; -import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.filedistribution.FileDownloader; @@ -29,9 +27,7 @@ public class FileDistributionAndUrlDownload { new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup")); public FileDistributionAndUrlDownload(Supervisor supervisor, ConfigSourceSet source) { - fileDistributionRpcServer = - new FileDistributionRpcServer(supervisor, - new FileDownloader(createConnectionPool(supervisor, source), supervisor, Duration.ofMinutes(5))); + fileDistributionRpcServer = new FileDistributionRpcServer(supervisor, createDownloader(supervisor, source)); urlDownloadRpcServer = new UrlDownloadRpcServer(supervisor); cleanupExecutor.scheduleAtFixedRate(new CachedFilesMaintainer(), delay.toSeconds(), delay.toSeconds(), TimeUnit.SECONDS); } @@ -48,12 +44,10 @@ public class FileDistributionAndUrlDownload { } } - private static ConnectionPool createConnectionPool(Supervisor supervisor, ConfigSourceSet source) { - String useFileDistributionConnectionPool = System.getenv("VESPA_CONFIG_PROXY_USE_FILE_DISTRIBUTION_CONNECTION_POOL"); - if (useFileDistributionConnectionPool != null && useFileDistributionConnectionPool.equalsIgnoreCase("true")) - return new FileDistributionConnectionPool(source, supervisor); - else - return new JRTConnectionPool(source, supervisor); + private FileDownloader createDownloader(Supervisor supervisor, ConfigSourceSet source) { + return new FileDownloader(new FileDistributionConnectionPool(source, supervisor), + supervisor, + Duration.ofMinutes(5)); } } diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java index 1bcf8d5d8be..691bc6c43a7 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServerTest.java @@ -92,7 +92,7 @@ public class ConfigProxyRpcServerTest { assertThat(ret.length, is(0)); final RawConfig config = ProxyServerTest.fooConfig; - server.proxyServer().getMemoryCache().update(config); + server.proxyServer().memoryCache().update(config); req = new Request("listCachedConfig"); client.invoke(req); assertFalse(req.errorMessage(), req.isError()); @@ -119,7 +119,7 @@ public class ConfigProxyRpcServerTest { assertThat(ret.length, is(0)); final RawConfig config = ProxyServerTest.fooConfig; - server.proxyServer().getMemoryCache().update(config); + server.proxyServer().memoryCache().update(config); req = new Request("listCachedConfigFull"); client.invoke(req); assertFalse(req.errorMessage(), req.isError()); @@ -133,7 +133,7 @@ public class ConfigProxyRpcServerTest { } /** - * Tests printStatistics RPC command + * Tests listSourceConnections RPC command */ @Test public void testRpcMethodListSourceConnections() throws ListenFailedException { @@ -151,20 +151,6 @@ public class ConfigProxyRpcServerTest { } /** - * Tests printStatistics RPC command - */ - @Test - public void testRpcMethodPrintStatistics() { - Request req = new Request("printStatistics"); - client.invoke(req); - assertFalse(req.errorMessage(), req.isError()); - assertThat(req.returnValues().size(), is(1)); - assertThat(req.returnValues().get(0).asString(), is("\n" + - "Delayed responses queue size: 0\n" + - "Contents: ")); - } - - /** * Tests invalidateCache RPC command */ @Test @@ -275,7 +261,7 @@ public class ConfigProxyRpcServerTest { } private static ProxyServer createTestServer(ConfigSourceSet source) { - return new ProxyServer(null, source, new MemoryCache(), null); + return new ProxyServer(null, source, new RpcConfigSourceClient(new ResponseHandler(), source)); } private static class TestServer implements AutoCloseable { diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java index c2a0282fd05..8a668b34fd0 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/DelayedResponseHandlerTest.java @@ -6,8 +6,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertEquals; /** * @author hmusum @@ -29,16 +28,15 @@ public class DelayedResponseHandlerTest { public void basic() { ConfigTester tester = new ConfigTester(); DelayedResponses delayedResponses = new DelayedResponses(); - final MockRpcServer mockRpcServer = new MockRpcServer(); - final MemoryCache memoryCache = new MemoryCache(); + MemoryCache memoryCache = new MemoryCache(); memoryCache.update(ConfigTester.fooConfig); - final DelayedResponseHandler delayedResponseHandler = new DelayedResponseHandler(delayedResponses, memoryCache, mockRpcServer); + DelayedResponseHandler delayedResponseHandler = new DelayedResponseHandler(delayedResponses, memoryCache, new ResponseHandler()); delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.fooConfig, 0))); delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.fooConfig, 1200000))); // should not be returned yet delayedResponses.add(new DelayedResponse(tester.createRequest(ProxyServerTest.errorConfig, 0))); // will not give a config when resolving delayedResponseHandler.checkDelayedResponses(); - assertThat(mockRpcServer.responses, is(1L)); + assertEquals(1, delayedResponseHandler.sentResponses()); } } diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java index 51d0b983764..0b590aea789 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheConfigClientTest.java @@ -16,9 +16,8 @@ public class MemoryCacheConfigClientTest { @Test public void basic() { - MemoryCache cache = new MemoryCache(); - cache.update(ConfigTester.fooConfig); - MemoryCacheConfigClient client = new MemoryCacheConfigClient(cache); + MemoryCacheConfigClient client = new MemoryCacheConfigClient(new MemoryCache()); + client.memoryCache().update(ConfigTester.fooConfig); assertThat(client.getConfig(ConfigTester.fooConfig, null), is(ConfigTester.fooConfig)); assertNull(client.getConfig(ConfigTester.barConfig, null)); diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java index c0efc1cb355..d0724b9dbd0 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockConfigSourceClient.java @@ -18,9 +18,9 @@ public class MockConfigSourceClient implements ConfigSourceClient{ private final MemoryCache memoryCache; private final DelayedResponses delayedResponses = new DelayedResponses(); - MockConfigSourceClient(MockConfigSource configSource, MemoryCache memoryCache) { + MockConfigSourceClient(MockConfigSource configSource) { this.configSource = configSource; - this.memoryCache = memoryCache; + this.memoryCache = new MemoryCache(); } @Override @@ -35,7 +35,7 @@ public class MockConfigSourceClient implements ConfigSourceClient{ } @Override - public void cancel() { + public void shutdown() { configSource.clear(); } @@ -56,4 +56,7 @@ public class MockConfigSourceClient implements ConfigSourceClient{ @Override public DelayedResponses delayedResponses() { return delayedResponses; } + @Override + public MemoryCache memoryCache() { return memoryCache; } + } diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java deleted file mode 100644 index 56fcca191de..00000000000 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MockRpcServer.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.config.proxy; - -import com.yahoo.vespa.config.RawConfig; -import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; - -/** - * @author hmusum - */ -public class MockRpcServer implements RpcServer { - - volatile long responses = 0; - volatile long errorResponses = 0; - - public void returnOkResponse(JRTServerConfigRequest request, RawConfig config) { - responses++; - } - - public void returnErrorResponse(JRTServerConfigRequest request, int errorCode, String message) { - responses++; - errorResponses++; - } -} diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java index cdda2bf6e77..15de93b748f 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java @@ -2,7 +2,10 @@ package com.yahoo.vespa.config.proxy; import com.yahoo.config.subscription.ConfigSourceSet; -import com.yahoo.vespa.config.*; +import com.yahoo.vespa.config.ConfigCacheKey; +import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.ErrorCode; +import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.protocol.Payload; import org.junit.After; @@ -25,9 +28,8 @@ import static org.junit.Assert.assertTrue; */ public class ProxyServerTest { - private final MemoryCache memoryCache = new MemoryCache(); private final MockConfigSource source = new MockConfigSource(); - private final MockConfigSourceClient client = new MockConfigSourceClient(source, memoryCache); + private final ConfigSourceClient client = new MockConfigSourceClient(source); private ProxyServer proxy; static final RawConfig fooConfig = ConfigTester.fooConfig; @@ -46,7 +48,7 @@ public class ProxyServerTest { source.clear(); source.put(fooConfig.getKey(), createConfigWithNextConfigGeneration(fooConfig, 0)); source.put(errorConfigKey, createConfigWithNextConfigGeneration(fooConfig, ErrorCode.UNKNOWN_DEFINITION)); - proxy = createTestServer(source, client, memoryCache); + proxy = createTestServer(source, client); } @After @@ -57,10 +59,10 @@ public class ProxyServerTest { @Test public void basic() { assertTrue(proxy.getMode().isDefault()); - assertThat(proxy.getMemoryCache().size(), is(0)); + assertThat(proxy.memoryCache().size(), is(0)); ConfigTester tester = new ConfigTester(); - final MemoryCache memoryCache = proxy.getMemoryCache(); + MemoryCache memoryCache = proxy.memoryCache(); assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); @@ -74,7 +76,7 @@ public class ProxyServerTest { */ @Test public void testModeSwitch() { - ProxyServer proxy = createTestServer(source, client, new MemoryCache()); + ProxyServer proxy = createTestServer(source, client); assertTrue(proxy.getMode().isDefault()); for (String mode : Mode.modes()) { @@ -109,7 +111,7 @@ public class ProxyServerTest { @Test public void testGetConfigAndCaching() { ConfigTester tester = new ConfigTester(); - final MemoryCache memoryCache = proxy.getMemoryCache(); + MemoryCache memoryCache = proxy.memoryCache(); assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); @@ -134,14 +136,14 @@ public class ProxyServerTest { // Simulate an error response source.put(fooConfig.getKey(), createConfigWithNextConfigGeneration(fooConfig, ErrorCode.INTERNAL_ERROR)); - final MemoryCache cacheManager = proxy.getMemoryCache(); - assertEquals(0, cacheManager.size()); + MemoryCache memoryCache = proxy.memoryCache(); + assertEquals(0, memoryCache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); assertNotNull(res); assertNotNull(res.getPayload()); assertTrue(res.isError()); - assertEquals(0, cacheManager.size()); + assertEquals(0, memoryCache.size()); // Put a version of the same config into backend without error and see that it now works (i.e. we are // not getting a cached response (of the error in the previous request) @@ -152,12 +154,12 @@ public class ProxyServerTest { assertNotNull(res); assertNotNull(res.getPayload().getData()); assertThat(res.getPayload().toString(), is(ConfigTester.fooPayload.toString())); - assertEquals(1, cacheManager.size()); + assertEquals(1, memoryCache.size()); JRTServerConfigRequest newRequestBasedOnResponse = tester.createRequest(res); RawConfig res2 = proxy.resolveConfig(newRequestBasedOnResponse); assertFalse(ProxyServer.configOrGenerationHasChanged(res2, newRequestBasedOnResponse)); - assertEquals(1, cacheManager.size()); + assertEquals(1, memoryCache.size()); } /** @@ -169,7 +171,7 @@ public class ProxyServerTest { @Test public void testNoCachingOfEmptyConfig() { ConfigTester tester = new ConfigTester(); - MemoryCache cache = proxy.getMemoryCache(); + MemoryCache cache = proxy.memoryCache(); assertEquals(0, cache.size()); RawConfig res = proxy.resolveConfig(tester.createRequest(fooConfig)); @@ -222,10 +224,8 @@ public class ProxyServerTest { assertThat(properties.configSources[0], is(ProxyServer.DEFAULT_PROXY_CONFIG_SOURCES)); } - private static ProxyServer createTestServer(ConfigSourceSet source, - ConfigSourceClient configSourceClient, - MemoryCache memoryCache) { - return new ProxyServer(null, source, memoryCache, configSourceClient); + private static ProxyServer createTestServer(ConfigSourceSet source, ConfigSourceClient configSourceClient) { + return new ProxyServer(null, source, configSourceClient); } static RawConfig createConfigWithNextConfigGeneration(RawConfig config, int errorCode) { diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java index 372c8c41c99..ada98f4b30e 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClientTest.java @@ -17,7 +17,7 @@ import static org.junit.Assert.assertEquals; */ public class RpcConfigSourceClientTest { - private MockRpcServer rpcServer; + private ResponseHandler responseHandler; private RpcConfigSourceClient rpcConfigSourceClient; @Rule @@ -26,8 +26,8 @@ public class RpcConfigSourceClientTest { @Before public void setup() { - rpcServer = new MockRpcServer(); - rpcConfigSourceClient = new RpcConfigSourceClient(rpcServer, new MockConfigSource(), new MemoryCache()); + responseHandler = new ResponseHandler(true); + rpcConfigSourceClient = new RpcConfigSourceClient(responseHandler, new MockConfigSource()); } @Test @@ -90,7 +90,7 @@ public class RpcConfigSourceClientTest { } private void assertSentResponses(int expected) { - assertEquals(expected, rpcServer.responses); + assertEquals(expected, responseHandler.sentResponses()); } private void simulateClientRequestingConfig(RawConfig config) { diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java index 0c4ac005bb6..fdfaf8b72fd 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java @@ -70,7 +70,7 @@ public class JRTConfigRequester implements RequestWaiter { this.connectionPool = connectionPool; this.timingValues = timingValues; // Adjust so that we wait 1 second with logging warning in case there are some errors just when starting up - timeForLastLogWarning = Instant.now().minus(delayBetweenWarnings.plus(Duration.ofSeconds(1))); + timeForLastLogWarning = Instant.now().minus(delayBetweenWarnings.plus(Duration.ofSeconds(5))); } /** diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java index 983113ab2bf..3dc0f7188e3 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java @@ -71,13 +71,10 @@ public class ConfigInstancePayloadTest { intArr(310).intArr(311)). rootStruct(new RootStruct.Builder(). - inner0(new RootStruct.Inner0.Builder(). - index(11)). + inner0(b -> b.index(11)). inner1(new RootStruct.Inner1.Builder(). index(12)). - innerArr(new RootStruct.InnerArr.Builder(). - boolVal(true). - stringVal("deep")). + innerArr(b -> b.boolVal(true).stringVal("deep")). innerArr(new RootStruct.InnerArr.Builder(). boolVal(false). stringVal("blue a=\"escaped\""))). @@ -89,32 +86,29 @@ public class ConfigInstancePayloadTest { enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file0"). - anotherarray(new Myarray.Anotherarray.Builder(). - foo(7)). + anotherarray(b -> b.foo(7)). myStruct(new Myarray.MyStruct.Builder(). a(1). b(2))). - myarray(new Myarray.Builder(). + myarray(b -> b. intval(5). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file1"). - anotherarray(new Myarray.Anotherarray.Builder(). - foo(1). - foo(2)). - myStruct(new Myarray.MyStruct.Builder(). - a(-1). - b(-2))). + anotherarray(bb -> bb.foo(1).foo(2)). + myStruct(bb -> bb. + a(-1). + b(-2))). myStructMap("one", new MyStructMap.Builder(). myInt(1). myString("bull"). myIntDef(2). myStringDef("bear"). - anotherMap("anotherOne", new MyStructMap.AnotherMap.Builder(). - anInt(3). - anIntDef(4))); + anotherMap("anotherOne", b -> b. + anInt(3). + anIntDef(4))); } @Test diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java index dabfbffef98..aaf6782a6ff 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceUtilTest.java @@ -32,15 +32,12 @@ public class ConfigInstanceUtilTest { .int_val(-1) .intarr(0) .doublearr(0.0) - .basicStruct(new FunctionTestConfig.BasicStruct.Builder() - .bar(-1) - .intArr(0)) - .myarray(new FunctionTestConfig.Myarray.Builder() + .basicStruct(b -> b.bar(-1).intArr(0)) + .myarray(b -> b .intval(-1) .refval("") .fileVal("") - .myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder() - .a(0) + .myStruct(bb -> bb.a(0) )); ConfigInstanceUtil.setValues(destination, source); @@ -105,47 +102,34 @@ public class ConfigInstanceUtilTest { refarr(Arrays.asList(":parent:", ":parent", "parent:")). // test collection based setter fileArr("bin"). - basicStruct(new FunctionTestConfig.BasicStruct.Builder(). + basicStruct(b -> b. foo("basicFoo"). bar(3). intArr(310).intArr(311)). - rootStruct(new FunctionTestConfig.RootStruct.Builder(). - inner0(new FunctionTestConfig.RootStruct.Inner0.Builder(). - index(11)). - inner1(new FunctionTestConfig.RootStruct.Inner1.Builder(). - index(12)). - innerArr(new FunctionTestConfig.RootStruct.InnerArr.Builder(). - boolVal(true). - stringVal("deep")). - innerArr(new FunctionTestConfig.RootStruct.InnerArr.Builder(). - boolVal(false). - stringVal("blue a=\"escaped\""))). - - myarray(new FunctionTestConfig.Myarray.Builder(). + rootStruct(b -> b. + inner0(bb -> bb.index(11)). + inner1(bb -> bb.index(12)). + innerArr(bb -> bb.boolVal(true).stringVal("deep")). + innerArr(bb -> bb.boolVal(false).stringVal("blue a=\"escaped\""))). + + myarray(b -> b. intval(-5). stringval("baah"). stringval("yikes"). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file0"). - anotherarray(new FunctionTestConfig.Myarray.Anotherarray.Builder(). - foo(7)). - myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder(). - a(1). - b(2))). + anotherarray(bb -> bb.foo(7)). + myStruct(bb -> bb.a(1).b(2))). - myarray(new FunctionTestConfig.Myarray.Builder(). + myarray(b -> b. intval(5). enumval(Myarray.Enumval.INNER). refval(":parent:"). fileVal("file1"). - anotherarray(new FunctionTestConfig.Myarray.Anotherarray.Builder(). - foo(1). - foo(2)). - myStruct(new FunctionTestConfig.Myarray.MyStruct.Builder(). - a(-1). - b(-2))); + anotherarray(bb -> bb.foo(1).foo(2)). + myStruct(bb -> bb.a(-1).b(-2))); } diff --git a/config/src/vespa/config/helper/configfetcher.cpp b/config/src/vespa/config/helper/configfetcher.cpp index 7a6f806c6ff..d85308bbcbb 100644 --- a/config/src/vespa/config/helper/configfetcher.cpp +++ b/config/src/vespa/config/helper/configfetcher.cpp @@ -9,9 +9,11 @@ LOG_SETUP(".config.helper.configfetcher"); namespace config { +VESPA_THREAD_STACK_TAG(config_fetcher_thread); + ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context) : _poller(context), - _thread(std::make_unique<vespalib::Thread>(_poller)), + _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)), _closed(false), _started(false) { @@ -19,7 +21,7 @@ ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context) ConfigFetcher::ConfigFetcher(const SourceSpec & spec) : _poller(std::make_shared<ConfigContext>(spec)), - _thread(std::make_unique<vespalib::Thread>(_poller)), + _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)), _closed(false), _started(false) { diff --git a/config/src/vespa/config/retriever/simpleconfigurer.cpp b/config/src/vespa/config/retriever/simpleconfigurer.cpp index 74022cfd6a3..5059b9997f5 100644 --- a/config/src/vespa/config/retriever/simpleconfigurer.cpp +++ b/config/src/vespa/config/retriever/simpleconfigurer.cpp @@ -8,10 +8,12 @@ LOG_SETUP(".config.retriever.simpleconfigurer"); namespace config { +VESPA_THREAD_STACK_TAG(simple_configurer_thread); + SimpleConfigurer::SimpleConfigurer(SimpleConfigRetriever::UP retriever, SimpleConfigurable * const configurable) : _retriever(std::move(retriever)), _configurable(configurable), - _thread(*this), + _thread(*this, simple_configurer_thread), _started(false) { assert(_retriever); diff --git a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java index 548bd14ccb3..937d04b35bd 100644 --- a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java +++ b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java @@ -142,7 +142,19 @@ public class BuilderGenerator { return "public Builder " + n.getName() + "(" + builderType(n) + " " + INTERNAL_PREFIX + "builder) {\n" + // " " + n.getName() + " = " + INTERNAL_PREFIX + "builder;\n" + // " return this;\n" + // + "}\n" + // + "/**\n" + // + " * Make a new builder and run the supplied function on it before adding it to the list\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + " = __inner;\n" + // + " return this;\n" + // "}"; + } private static String innerArraySetters(InnerCNode n) { @@ -157,6 +169,18 @@ public class BuilderGenerator { "}\n" + // "\n" + // "/**\n" + // + " * Make a new builder and run the supplied function on it before adding it to the list\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + ".add(__inner);\n" + // + " return this;\n" + // + "}\n" + // + "\n" + // + "/**\n" + // " * Set the given list as this builder's list of " + nodeClass(n) + " builders\n" + // " * @param __builders a list of builders\n" + // " * @return this builder\n" + // @@ -195,8 +219,7 @@ public class BuilderGenerator { } private static String innerMapSetters(CNode n) { - return "public Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "key, " + builderType(n) + " " + INTERNAL_PREFIX - + "value) {\n" + // + String r = "public Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "key, " + builderType(n) + " " + INTERNAL_PREFIX + "value) {\n" + // " " + n.getName() + ".put(" + INTERNAL_PREFIX + "key, " + INTERNAL_PREFIX + "value);\n" + // " return this;\n" + // "}\n" + // @@ -205,6 +228,22 @@ public class BuilderGenerator { " " + n.getName() + ".putAll(" + INTERNAL_PREFIX + "values);\n" + // " return this;\n" + // "}"; + if (n instanceof InnerCNode) { + r = r + + "\n\n" + // + "/**\n" + // + " * Make a new builder and run the supplied function on it before using it as the value\n" + // + " * @param __func lambda that modifies the given builder\n" + // + " * @return this builder\n" + // + " */\n" + // + "public Builder " + n.getName() + "(String __key, java.util.function.Consumer<" + builderType(n) + "> __func) {\n" + // + " " + builderType(n) + " __inner = new " + builderType(n) +"();\n" + // + " __func.accept(__inner);\n" + // + " " + n.getName() + ".put(__key, __inner);\n" + // + " return this;\n" + // + "}"; + } + return r; } private static String privateLeafMapSetter(CNode n) { diff --git a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java index f9b36974e15..cce2d66b5f0 100644 --- a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java +++ b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java @@ -116,6 +116,7 @@ public class JavaClassBuilderTest { JavaClassBuilder builder = new JavaClassBuilder(root, parser.getNormalizedDefinition(), null, null); String[] configClassLines = builder.getConfigClass("AllfeaturesConfig").split("\n"); + for (var line : configClassLines) { System.out.println(line); } for (int i = 0; i < referenceClassLines.size(); i++) { if (configClassLines.length <= i) fail("Missing lines i generated config class. First missing line:\n" + referenceClassLines.get(i)); diff --git a/configgen/src/test/resources/allfeatures.reference b/configgen/src/test/resources/allfeatures.reference index e0b6176efa2..7698ea6727c 100644 --- a/configgen/src/test/resources/allfeatures.reference +++ b/configgen/src/test/resources/allfeatures.reference @@ -565,11 +565,33 @@ public final class AllfeaturesConfig extends ConfigInstance { basic_struct = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder basic_struct(java.util.function.Consumer<Basic_struct.Builder> __func) { + Basic_struct.Builder __inner = new Basic_struct.Builder(); + __func.accept(__inner); + basic_struct = __inner; + return this; + } public Builder struct_of_struct(Struct_of_struct.Builder __builder) { struct_of_struct = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder struct_of_struct(java.util.function.Consumer<Struct_of_struct.Builder> __func) { + Struct_of_struct.Builder __inner = new Struct_of_struct.Builder(); + __func.accept(__inner); + struct_of_struct = __inner; + return this; + } /** * Add the given builder to this builder's list of MyArray builders @@ -582,6 +604,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder myArray(java.util.function.Consumer<MyArray.Builder> __func) { + MyArray.Builder __inner = new MyArray.Builder(); + __func.accept(__inner); + myArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of MyArray builders * @param __builders a list of builders * @return this builder @@ -601,6 +635,18 @@ public final class AllfeaturesConfig extends ConfigInstance { return this; } + /** + * Make a new builder and run the supplied function on it before using it as the value + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder myMap(String __key, java.util.function.Consumer<MyMap.Builder> __func) { + MyMap.Builder __inner = new MyMap.Builder(); + __func.accept(__inner); + myMap.put(__key, __inner); + return this; + } + private boolean _applyOnRestart = false; @java.lang.Override @@ -1305,11 +1351,33 @@ public final class AllfeaturesConfig extends ConfigInstance { inner0 = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder inner0(java.util.function.Consumer<Inner0.Builder> __func) { + Inner0.Builder __inner = new Inner0.Builder(); + __func.accept(__inner); + inner0 = __inner; + return this; + } public Builder inner1(Inner1.Builder __builder) { inner1 = __builder; return this; } + /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder inner1(java.util.function.Consumer<Inner1.Builder> __func) { + Inner1.Builder __inner = new Inner1.Builder(); + __func.accept(__inner); + inner1 = __inner; + return this; + } public Struct_of_struct build() { return new Struct_of_struct(this); @@ -1616,6 +1684,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder anotherArray(java.util.function.Consumer<AnotherArray.Builder> __func) { + AnotherArray.Builder __inner = new AnotherArray.Builder(); + __func.accept(__inner); + anotherArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of AnotherArray builders * @param __builders a list of builders * @return this builder @@ -1914,6 +1994,18 @@ public final class AllfeaturesConfig extends ConfigInstance { } /** + * Make a new builder and run the supplied function on it before adding it to the list + * @param __func lambda that modifies the given builder + * @return this builder + */ + public Builder anotherArray(java.util.function.Consumer<AnotherArray.Builder> __func) { + AnotherArray.Builder __inner = new AnotherArray.Builder(); + __func.accept(__inner); + anotherArray.add(__inner); + return this; + } + + /** * Set the given list as this builder's list of AnotherArray builders * @param __builders a list of builders * @return this builder diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java index 00dc1f4d065..69098ea0030 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java @@ -1081,7 +1081,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye public double getQuotaUsageRate(ApplicationId applicationId) { var application = getApplication(applicationId); return application.getModel().provisioned().all().values().stream() - .map(Capacity::maxResources) + .map(Capacity::maxResources)// TODO: This may be unspecified -> 0 .mapToDouble(resources -> resources.nodes() * resources.nodeResources().cost()) .sum(); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java index a7deac443a5..1b55d17fd36 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java @@ -201,6 +201,7 @@ public class ModelContextImpl implements ModelContext { private final boolean ignoreThreadStackSizes; private final boolean unorderedMergeChaining; private final boolean useV8GeoPositions; + private final boolean useV8DocManagerCfg; public FeatureFlags(FlagSource source, ApplicationId appId) { this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT); @@ -240,7 +241,7 @@ public class ModelContextImpl implements ModelContext { this.ignoreThreadStackSizes = flagValue(source, appId, Flags.IGNORE_THREAD_STACK_SIZES); this.unorderedMergeChaining = flagValue(source, appId, Flags.UNORDERED_MERGE_CHAINING); this.useV8GeoPositions = flagValue(source, appId, Flags.USE_V8_GEO_POSITIONS); - + this.useV8DocManagerCfg = flagValue(source, appId, Flags.USE_V8_DOC_MANAGER_CFG); } @Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; } @@ -282,6 +283,7 @@ public class ModelContextImpl implements ModelContext { @Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; } @Override public boolean unorderedMergeChaining() { return unorderedMergeChaining; } @Override public boolean useV8GeoPositions() { return useV8GeoPositions; } + @Override public boolean useV8DocManagerCfg() { return useV8DocManagerCfg; } private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) { return flag.bindTo(source) diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java index 5f3999a9fdf..b6a7efd3d4d 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java @@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; import com.yahoo.vespa.config.ConnectionPool; -import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.filedistribution.CompressedFileReference; import com.yahoo.vespa.filedistribution.EmptyFileReferenceData; @@ -22,8 +21,6 @@ import com.yahoo.vespa.filedistribution.FileReferenceData; import com.yahoo.vespa.filedistribution.FileReferenceDownload; import com.yahoo.vespa.filedistribution.LazyFileReferenceData; import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData; -import com.yahoo.vespa.flags.FlagSource; -import com.yahoo.vespa.flags.Flags; import com.yahoo.yolean.Exceptions; import java.io.File; @@ -40,6 +37,7 @@ import java.util.logging.Logger; import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.getOtherConfigServersInCluster; public class FileServer { + private static final Logger log = Logger.getLogger(FileServer.class.getName()); private final FileDirectory root; @@ -77,15 +75,14 @@ public class FileServer { @SuppressWarnings("WeakerAccess") // Created by dependency injection @Inject - public FileServer(ConfigserverConfig configserverConfig, FlagSource flagSource) { + public FileServer(ConfigserverConfig configserverConfig) { this(new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())), - createFileDownloader(getOtherConfigServersInCluster(configserverConfig), - Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value())); + createFileDownloader(getOtherConfigServersInCluster(configserverConfig))); } // For testing only public FileServer(File rootDir) { - this(rootDir, createFileDownloader(List.of(), true)); + this(rootDir, createFileDownloader(List.of())); } public FileServer(File rootDir, FileDownloader fileDownloader) { @@ -212,22 +209,19 @@ public class FileServer { executor.shutdown(); } - private static FileDownloader createFileDownloader(List<String> configServers, boolean useFileDistributionConnectionPool) { + private static FileDownloader createFileDownloader(List<String> configServers) { Supervisor supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true); return new FileDownloader(configServers.isEmpty() ? FileDownloader.emptyConnectionPool() - : createConnectionPool(configServers, supervisor, useFileDistributionConnectionPool), + : createConnectionPool(configServers, supervisor), supervisor); } - private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor, boolean useFileDistributionConnectionPool) { + private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor) { ConfigSourceSet configSourceSet = new ConfigSourceSet(configServers); - if (configServers.size() == 0) return FileDownloader.emptyConnectionPool(); - return useFileDistributionConnectionPool - ? new FileDistributionConnectionPool(configSourceSet, supervisor) - : new JRTConnectionPool(configSourceSet, supervisor); + return new FileDistributionConnectionPool(configSourceSet, supervisor); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java index 0dc7dbda9a1..43ed16ab21c 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java @@ -3,8 +3,6 @@ package com.yahoo.vespa.config.server.http.v1; import com.google.inject.Inject; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Deployer; -import com.yahoo.config.provision.Deployment; import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.path.Path; import com.yahoo.restapi.RestApi; @@ -20,7 +18,6 @@ import com.yahoo.vespa.curator.transaction.CuratorTransaction; import com.yahoo.yolean.Exceptions; import java.time.Clock; -import java.time.Duration; import java.time.Instant; import java.util.Arrays; import java.util.List; @@ -50,18 +47,16 @@ public class RoutingStatusApiHandler extends RestApiRequestHandler<RoutingStatus private final Curator curator; private final Clock clock; - private final Deployer deployer; @Inject - public RoutingStatusApiHandler(Context context, Curator curator, Deployer deployer) { - this(context, curator, Clock.systemUTC(), deployer); + public RoutingStatusApiHandler(Context context, Curator curator) { + this(context, curator, Clock.systemUTC()); } - RoutingStatusApiHandler(Context context, Curator curator, Clock clock, Deployer deployer) { + RoutingStatusApiHandler(Context context, Curator curator, Clock clock) { super(context, RoutingStatusApiHandler::createRestApiDefinition); this.curator = Objects.requireNonNull(curator); this.clock = Objects.requireNonNull(clock); - this.deployer = Objects.requireNonNull(deployer); curator.create(DEPLOYMENT_STATUS_ROOT); } @@ -113,28 +108,17 @@ public class RoutingStatusApiHandler extends RestApiRequestHandler<RoutingStatus RestApi.RequestContext.RequestContent requestContent = context.requestContentOrThrow(); Slime requestBody = Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(requestContent.content().readAllBytes())); DeploymentRoutingStatus wantedStatus = deploymentRoutingStatusFromSlime(requestBody, clock.instant()); - DeploymentRoutingStatus currentStatus = deploymentStatus(upstreamNames.iterator().next()); - + List<DeploymentRoutingStatus> currentStatuses = upstreamNames.stream() + .map(this::deploymentStatus) + .collect(Collectors.toList()); + DeploymentRoutingStatus currentStatus = currentStatuses.get(0); // Redeploy application so that a new LbServicesConfig containing the updated status is generated and consumed - // by routing layer. This is required to update weights for application endpoints when routing status for a - // deployment is changed + // by routing layer. This is required to update status of upstreams in application endpoints log.log(Level.INFO, "Changing routing status of " + instance + " from " + currentStatus.status() + " to " + wantedStatus.status()); - changeStatus(upstreamNames, wantedStatus); - try { - Optional<Deployment> deployment = deployer.deployFromLocalActive(instance, Duration.ofMinutes(1)); - if (deployment.isEmpty()) throw new IllegalArgumentException("No deployment of " + instance + " found"); - deployment.get().activate(); - } catch (Exception e) { - log.log(Level.SEVERE, "Failed to redeploy " + instance + ". Reverting routing status to " + - currentStatus.status(), e); - changeStatus(upstreamNames, currentStatus); - throw new RestApiException.InternalServerError("Failed to change status to " + - wantedStatus.status() + ", reverting to " - + currentStatus.status() + - " because redeployment of " + - instance + " failed: " + - Exceptions.toMessageString(e)); + boolean needsChange = currentStatuses.stream().anyMatch(status -> status.status() != wantedStatus.status()); + if (needsChange) { + changeStatus(upstreamNames, wantedStatus); } return new SlimeJsonResponse(toSlime(wantedStatus)); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java index cf082cb32b1..53007566a62 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java @@ -8,18 +8,16 @@ import com.yahoo.config.subscription.ConfigSourceSet; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Transport; import com.yahoo.vespa.config.ConnectionPool; -import com.yahoo.vespa.config.JRTConnectionPool; import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.session.Session; import com.yahoo.vespa.config.server.session.SessionRepository; import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.defaults.Defaults; +import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.filedistribution.FileDownloader; import com.yahoo.vespa.filedistribution.FileReferenceDownload; -import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool; import com.yahoo.vespa.flags.FlagSource; -import com.yahoo.vespa.flags.Flags; import java.io.File; import java.time.Duration; @@ -54,11 +52,7 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer { this.applicationRepository = applicationRepository; this.configserverConfig = applicationRepository.configserverConfig(); this.downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); - boolean useFileDistributionConnectionPool = Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value(); - this.fileDownloader = createFileDownloader(configserverConfig, - useFileDistributionConnectionPool, - downloadDirectory, - supervisor); + this.fileDownloader = createFileDownloader(configserverConfig, downloadDirectory, supervisor); } @Override @@ -99,19 +93,14 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer { } private static FileDownloader createFileDownloader(ConfigserverConfig configserverConfig, - boolean useFileDistributionConnectionPool, File downloadDirectory, Supervisor supervisor) { List<String> otherConfigServersInCluster = getOtherConfigServersInCluster(configserverConfig); ConfigSourceSet configSourceSet = new ConfigSourceSet(otherConfigServersInCluster); - ConnectionPool connectionPool; - if (otherConfigServersInCluster.isEmpty()) - connectionPool = FileDownloader.emptyConnectionPool(); - else - connectionPool = useFileDistributionConnectionPool - ? new FileDistributionConnectionPool(configSourceSet, supervisor) - : new JRTConnectionPool(configSourceSet, supervisor); + ConnectionPool connectionPool = (otherConfigServersInCluster.isEmpty()) + ? FileDownloader.emptyConnectionPool() + : new FileDistributionConnectionPool(configSourceSet, supervisor); return new FileDownloader(connectionPool, supervisor, downloadDirectory, Duration.ofSeconds(30)); } diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver index efee86be29f..317af4b2fea 100755 --- a/configserver/src/main/sh/start-configserver +++ b/configserver/src/main/sh/start-configserver @@ -177,7 +177,6 @@ vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \ --add-opens=java.base/java.nio=ALL-UNNAMED \ --add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ - --add-opens=java.base/sun.security.util=ALL-UNNAMED \ -Djava.io.tmpdir=${VESPA_HOME}/tmp \ -Djava.library.path=${VESPA_HOME}/lib64 \ -Djava.awt.headless=true \ diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java index f85ca37a351..29ec11bad26 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java @@ -10,7 +10,6 @@ import com.yahoo.net.HostName; import com.yahoo.vespa.filedistribution.FileDownloader; import com.yahoo.vespa.filedistribution.FileReferenceData; import com.yahoo.vespa.filedistribution.FileReferenceDownload; -import com.yahoo.vespa.flags.InMemoryFlagSource; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -118,7 +117,7 @@ public class FileServerTest { private FileServer createFileServer(ConfigserverConfig.Builder configBuilder) throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); configBuilder.fileReferencesDir(fileReferencesDir.getAbsolutePath()); - return new FileServer(new ConfigserverConfig(configBuilder), new InMemoryFlagSource()); + return new FileServer(new ConfigserverConfig(configBuilder)); } private static class FileReceiver implements FileServer.Receiver { diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java index 8dd7cf4d6fc..e2b45d33cbc 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java @@ -2,9 +2,6 @@ package com.yahoo.vespa.config.server.http.v1; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Deployer; -import com.yahoo.config.provision.Deployment; -import com.yahoo.config.provision.HostFilter; import com.yahoo.container.jdisc.HttpRequestBuilder; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.jdisc.http.HttpRequest.Method; @@ -19,17 +16,11 @@ import org.junit.Test; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.nio.charset.StandardCharsets; -import java.time.Clock; -import java.time.Duration; import java.time.Instant; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Optional; import static com.yahoo.yolean.Exceptions.uncheck; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * @author bjorncs @@ -42,7 +33,6 @@ public class RoutingStatusApiHandlerTest { private final Curator curator = new MockCurator(); private final ManualClock clock = new ManualClock(); - private final MockDeployer deployer = new MockDeployer(clock); private RestApiTestDriver testDriver; @@ -50,8 +40,7 @@ public class RoutingStatusApiHandlerTest { public void before() { RoutingStatusApiHandler requestHandler = new RoutingStatusApiHandler(RestApiTestDriver.createHandlerTestContext(), curator, - clock, - deployer); + clock); testDriver = RestApiTestDriver.newBuilder(requestHandler).build(); } @@ -77,14 +66,6 @@ public class RoutingStatusApiHandlerTest { String response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), statusOut())); assertEquals(response("OUT", "issue-XXX", "operator", clock.instant()), response); - assertTrue("Re-deployed " + instance, deployer.lastDeployed.containsKey(instance)); - - // Status is reverted if redeployment fails - deployer.failNextDeployment(true); - response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(), - requestContent("IN", "all good"))); - assertEquals("{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"Failed to change status to in, reverting to out because redeployment of t1.a1.i1 failed: Deployment failed\"}", - response); // Read status stored in old format (path exists, but without content) curator.set(Path.fromString("/routing/v1/status/" + upstreamName), new byte[0]); @@ -92,7 +73,6 @@ public class RoutingStatusApiHandlerTest { assertEquals(response("OUT", "", "", clock.instant()), response); // Change status of multiple upstreams - deployer.failNextDeployment(false); String upstreamName2 = "upstream2"; String upstreams = upstreamName + "," + upstreamName2 + "," + upstreamName2; response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreams + "?application=" + instance.serializedForm(), @@ -172,57 +152,4 @@ public class RoutingStatusApiHandlerTest { return "{\"status\":\"" + status + "\",\"cause\":\"" + reason + "\",\"agent\":\"" + agent + "\",\"lastUpdate\":" + instant.getEpochSecond() + "}"; } - private static class MockDeployer implements Deployer { - - private final Map<ApplicationId, Instant> lastDeployed = new HashMap<>(); - private final Clock clock; - - private boolean failNextDeployment = false; - - public MockDeployer(Clock clock) { - this.clock = clock; - } - - public MockDeployer failNextDeployment(boolean fail) { - this.failNextDeployment = fail; - return this; - } - - @Override - public Optional<Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { - return deployFromLocalActive(application, Duration.ZERO, false); - } - - @Override - public Optional<Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { - if (failNextDeployment) { - throw new RuntimeException("Deployment failed"); - } - return Optional.of(new Deployment() { - @Override - public void prepare() {} - - @Override - public long activate() { - lastDeployed.put(application, clock.instant()); - return 1L; - } - - @Override - public void restart(HostFilter filter) {} - }); - } - - @Override - public Optional<Instant> lastDeployTime(ApplicationId application) { - return Optional.ofNullable(lastDeployed.get(application)); - } - - @Override - public Duration serverDeployTimeout() { - return Duration.ZERO; - } - - } - } diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json index 6bee1f2b4fb..a6783d1e5f5 100644 --- a/container-core/abi-spec.json +++ b/container-core/abi-spec.json @@ -1043,16 +1043,23 @@ "public com.yahoo.jdisc.http.ConnectorConfig$Builder tcpKeepAliveEnabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder tcpNoDelay(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder throttling(com.yahoo.jdisc.http.ConnectorConfig$Throttling$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder throttling(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder implicitTlsEnabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder ssl(com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder ssl(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder tlsClientAuthEnforcer(com.yahoo.jdisc.http.ConnectorConfig$TlsClientAuthEnforcer$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder tlsClientAuthEnforcer(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder healthCheckProxy(com.yahoo.jdisc.http.ConnectorConfig$HealthCheckProxy$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder healthCheckProxy(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder proxyProtocol(com.yahoo.jdisc.http.ConnectorConfig$ProxyProtocol$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder proxyProtocol(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder secureRedirect(com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder secureRedirect(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder maxRequestsPerConnection(int)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder maxConnectionLife(double)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2Enabled(boolean)", "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2(com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder)", + "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2(java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -1812,17 +1819,23 @@ "public com.yahoo.jdisc.http.ServerConfig$Builder maxKeepAliveRequests(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder removeRawPostBodyForWwwUrlEncodedPost(boolean)", "public com.yahoo.jdisc.http.ServerConfig$Builder filter(com.yahoo.jdisc.http.ServerConfig$Filter$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder filter(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder filter(java.util.List)", "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(com.yahoo.jdisc.http.ServerConfig$DefaultFilters$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder defaultFilters(java.util.List)", "public com.yahoo.jdisc.http.ServerConfig$Builder strictFiltering(boolean)", "public com.yahoo.jdisc.http.ServerConfig$Builder maxWorkerThreads(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder minWorkerThreads(int)", "public com.yahoo.jdisc.http.ServerConfig$Builder stopTimeout(double)", "public com.yahoo.jdisc.http.ServerConfig$Builder jmx(com.yahoo.jdisc.http.ServerConfig$Jmx$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder jmx(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder metric(com.yahoo.jdisc.http.ServerConfig$Metric$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder metric(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder accessLog(com.yahoo.jdisc.http.ServerConfig$AccessLog$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder accessLog(java.util.function.Consumer)", "public com.yahoo.jdisc.http.ServerConfig$Builder connectionLog(com.yahoo.jdisc.http.ServerConfig$ConnectionLog$Builder)", + "public com.yahoo.jdisc.http.ServerConfig$Builder connectionLog(java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -2067,6 +2080,7 @@ "public void <init>(com.yahoo.jdisc.http.ServletPathsConfig)", "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.lang.String, com.yahoo.jdisc.http.ServletPathsConfig$Servlets$Builder)", "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.util.Map)", + "public com.yahoo.jdisc.http.ServletPathsConfig$Builder servlets(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -2321,8 +2335,7 @@ "public" ], "methods": [ - "public void <init>(com.yahoo.jdisc.http.HttpRequest)", - "public com.yahoo.jdisc.http.HttpRequest getParentRequest()" + "public void <init>(com.yahoo.jdisc.http.HttpRequest)" ], "fields": [] }, @@ -2840,6 +2853,7 @@ "public void <init>(com.yahoo.processing.Request, com.yahoo.processing.request.ErrorMessage)", "public void mergeWith(com.yahoo.processing.Response)", "public com.yahoo.processing.response.DataList data()", + "public static java.util.concurrent.CompletableFuture recursiveFuture(com.yahoo.processing.response.DataList)", "public static com.google.common.util.concurrent.ListenableFuture recursiveComplete(com.yahoo.processing.response.DataList)" ], "fields": [] @@ -3138,8 +3152,9 @@ "public abstract void endResponse()", "public void <init>()", "public void <init>(java.util.concurrent.Executor)", - "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public void deconstruct()", + "public final java.util.concurrent.CompletableFuture renderResponseBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public final com.google.common.util.concurrent.ListenableFuture renderBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public com.yahoo.processing.execution.Execution getExecution()", "public com.yahoo.processing.Response getResponse()", @@ -3185,7 +3200,8 @@ "public void <init>()", "public com.yahoo.processing.rendering.Renderer clone()", "public void init()", - "public abstract com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public abstract java.lang.String getEncoding()", "public abstract java.lang.String getMimeType()", "public bridge synthetic com.yahoo.component.AbstractComponent clone()", @@ -3389,7 +3405,7 @@ "fields": [] }, "com.yahoo.processing.response.AbstractDataList$DrainOnGetFuture": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "com.yahoo.processing.impl.ProcessingFuture", "interfaces": [], "attributes": [ "public", @@ -3401,8 +3417,8 @@ "public boolean isCancelled()", "public com.yahoo.processing.response.DataList get()", "public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)" + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, @@ -3424,6 +3440,7 @@ "public com.yahoo.processing.Request request()", "public com.yahoo.processing.response.IncomingData incoming()", "public com.google.common.util.concurrent.ListenableFuture complete()", + "public java.util.concurrent.CompletableFuture completeFuture()", "public boolean isOrdered()", "public boolean isStreamed()", "public java.lang.String toString()" @@ -3482,6 +3499,7 @@ "public abstract com.yahoo.processing.response.Data get(int)", "public abstract java.util.List asList()", "public abstract com.yahoo.processing.response.IncomingData incoming()", + "public abstract java.util.concurrent.CompletableFuture completeFuture()", "public abstract com.google.common.util.concurrent.ListenableFuture complete()", "public abstract void addDataListener(java.lang.Runnable)", "public void close()" @@ -3502,6 +3520,7 @@ "public final void assignOwner(com.yahoo.processing.response.DataList)", "public com.yahoo.processing.response.DataList getOwner()", "public com.google.common.util.concurrent.ListenableFuture completed()", + "public java.util.concurrent.CompletableFuture completedFuture()", "public synchronized boolean isComplete()", "public synchronized void addLast(com.yahoo.processing.response.Data)", "public synchronized void add(com.yahoo.processing.response.Data)", @@ -3515,26 +3534,29 @@ "fields": [] }, "com.yahoo.processing.response.FutureResponse": { - "superClass": "com.google.common.util.concurrent.ForwardingFuture", - "interfaces": [], + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.concurrent.Future" + ], "attributes": [ "public" ], "methods": [ "public void <init>(java.util.concurrent.Callable, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", - "public com.google.common.util.concurrent.ListenableFutureTask delegate()", + "public java.util.concurrent.FutureTask delegate()", + "public boolean cancel(boolean)", + "public boolean isCancelled()", + "public boolean isDone()", "public com.yahoo.processing.Response get()", "public com.yahoo.processing.Response get(long, java.util.concurrent.TimeUnit)", "public com.yahoo.processing.Request getRequest()", "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.util.concurrent.Future delegate()", - "public bridge synthetic java.lang.Object delegate()" + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, "com.yahoo.processing.response.IncomingData$NullIncomingData$ImmediateFuture": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "com.yahoo.processing.impl.ProcessingFuture", "interfaces": [], "attributes": [ "public" @@ -3546,8 +3568,8 @@ "public boolean isDone()", "public com.yahoo.processing.response.DataList get()", "public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)" + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, @@ -3563,6 +3585,7 @@ "methods": [ "public void <init>(com.yahoo.processing.response.DataList)", "public com.google.common.util.concurrent.ListenableFuture completed()", + "public java.util.concurrent.CompletableFuture completedFuture()", "public com.yahoo.processing.response.DataList getOwner()", "public boolean isComplete()", "public void addLast(com.yahoo.processing.response.Data)", @@ -3586,6 +3609,7 @@ ], "methods": [ "public abstract com.yahoo.processing.response.DataList getOwner()", + "public abstract java.util.concurrent.CompletableFuture completedFuture()", "public abstract com.google.common.util.concurrent.ListenableFuture completed()", "public abstract boolean isComplete()", "public abstract void addLast(com.yahoo.processing.response.Data)", diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java index 8ac2305f5df..2580b4a6ac0 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java @@ -73,7 +73,7 @@ public class DiscFilterRequest { */ @Deprecated(forRemoval = true, since = "7.511") public HttpRequest getParentRequest() { - throw new UnsupportedOperationException("getParentRequest is not supported for " + parent.getClass().getName()); + return parent; } /** diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java index aa4050dd963..74c3b8adc7d 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java @@ -10,15 +10,8 @@ import com.yahoo.jdisc.http.HttpRequest; @Deprecated(forRemoval = true, since = "7.511") public class JdiscFilterRequest extends DiscFilterRequest { - private final HttpRequest parent; - public JdiscFilterRequest(HttpRequest parent) { super(parent); - this.parent = parent; } - @SuppressWarnings("removal") - @Override - public HttpRequest getParentRequest() { return parent; } - } diff --git a/container-core/src/main/java/com/yahoo/processing/Response.java b/container-core/src/main/java/com/yahoo/processing/Response.java index 0319a36f2f8..cf54d043c5f 100644 --- a/container-core/src/main/java/com/yahoo/processing/Response.java +++ b/container-core/src/main/java/com/yahoo/processing/Response.java @@ -1,12 +1,12 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing; -import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.provider.ListenableFreezableClass; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.concurrent.SystemTimer; import com.yahoo.processing.execution.ResponseReceiver; +import com.yahoo.processing.impl.ProcessingFuture; import com.yahoo.processing.request.CompoundName; import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.ArrayDataList; @@ -15,8 +15,8 @@ import com.yahoo.processing.response.DataList; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -57,7 +57,7 @@ public class Response extends ListenableFreezableClass { if (freezeListener != null) { if (freezeListener instanceof ResponseReceiver) ((ResponseReceiver)freezeListener).setResponse(this); - data.addFreezeListener(freezeListener, MoreExecutors.directExecutor()); + data.addFreezeListener(freezeListener, Runnable::run); } } @@ -96,15 +96,22 @@ public class Response extends ListenableFreezableClass { * @param rootDataList the list to complete recursively * @return the future in which all data in and below this list is complete, as the given root dataList for convenience */ - public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) { - List<ListenableFuture<DataList<D>>> futures = new ArrayList<>(); + public static <D extends Data> CompletableFuture<DataList<D>> recursiveFuture(DataList<D> rootDataList) { + List<CompletableFuture<DataList<D>>> futures = new ArrayList<>(); collectCompletionFutures(rootDataList, futures); return new CompleteAllOnGetFuture<D>(futures); } + /** @deprecated Use {@link #recursiveFuture(DataList)} instead */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) { + return CompletableFutures.toGuavaListenableFuture(recursiveFuture(rootDataList)); + } + @SuppressWarnings("unchecked") - private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<ListenableFuture<DataList<D>>> futures) { - futures.add(dataList.complete()); + private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<CompletableFuture<DataList<D>>> futures) { + futures.add(dataList.completeFuture()); for (D data : dataList.asList()) { if (data instanceof DataList) collectCompletionFutures((DataList<D>) data, futures); @@ -115,24 +122,24 @@ public class Response extends ListenableFreezableClass { * A future which on get calls get on all its given futures and sets the value returned from the * first given future as its result. */ - private static class CompleteAllOnGetFuture<D extends Data> extends AbstractFuture<DataList<D>> { + private static class CompleteAllOnGetFuture<D extends Data> extends ProcessingFuture<DataList<D>> { - private final List<ListenableFuture<DataList<D>>> futures; + private final List<CompletableFuture<DataList<D>>> futures; - public CompleteAllOnGetFuture(List<ListenableFuture<DataList<D>>> futures) { + public CompleteAllOnGetFuture(List<CompletableFuture<DataList<D>>> futures) { this.futures = new ArrayList<>(futures); } @Override public DataList<D> get() throws InterruptedException, ExecutionException { DataList<D> result = null; - for (ListenableFuture<DataList<D>> future : futures) { + for (CompletableFuture<DataList<D>> future : futures) { if (result == null) result = future.get(); else future.get(); } - set(result); + complete(result); return result; } @@ -141,7 +148,7 @@ public class Response extends ListenableFreezableClass { DataList<D> result = null; long timeLeft = unit.toMillis(timeout); long currentCallStart = SystemTimer.INSTANCE.milliTime(); - for (ListenableFuture<DataList<D>> future : futures) { + for (CompletableFuture<DataList<D>> future : futures) { if (result == null) result = future.get(timeLeft, TimeUnit.MILLISECONDS); else @@ -151,7 +158,7 @@ public class Response extends ListenableFreezableClass { if (timeLeft <= 0) break; currentCallStart = currentCallEnd; } - set(result); + complete(result); return result; } diff --git a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java index 5119e69f72e..9b9224e70ef 100644 --- a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java +++ b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java @@ -244,7 +244,8 @@ public abstract class AbstractProcessingHandler<COMPONENT extends Processor> ext // Render if we have a renderer capable of it if (getRenderer() instanceof AsynchronousSectionedRenderer) { - ((AsynchronousSectionedRenderer) getRenderer()).renderBeforeHandover(new ContentChannelOutputStream(channel), response, execution, request); + ((AsynchronousSectionedRenderer) getRenderer()).renderResponseBeforeHandover( + new ContentChannelOutputStream(channel), response, execution, request); } } diff --git a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java index 54fbce9e177..28645b4bde0 100644 --- a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java +++ b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java @@ -1,19 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.handler; -import java.io.IOException; -import java.io.OutputStream; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Executor; - import com.google.common.collect.ImmutableList; import com.yahoo.container.jdisc.AsyncHttpResponse; -import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.VespaHeaders; -import com.yahoo.container.logging.AccessLogEntry; import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; import com.yahoo.processing.Request; @@ -26,6 +16,14 @@ import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.Data; import com.yahoo.processing.response.DataList; +import java.io.IOException; +import java.io.OutputStream; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executor; + /** * A response from running a request through processing. This response is just a * wrapper of the knowhow needed to render the Response from processing. @@ -62,7 +60,7 @@ public class ProcessingResponse extends AsyncHttpResponse { AsynchronousRenderer asyncRenderer = (AsynchronousRenderer)renderer; asyncRenderer.setNetworkWiring(channel, completionHandler); } - renderer.render(stream, processingResponse, execution, processingRequest); + renderer.renderResponse(stream, processingResponse, execution, processingRequest); // the stream is closed in AsynchronousSectionedRenderer, after all data // has arrived } diff --git a/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java new file mode 100644 index 00000000000..ab597fffaff --- /dev/null +++ b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java @@ -0,0 +1,31 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.processing.impl; + +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * A {@link CompletableFuture} where {@link #get()}/{@link #get(long, TimeUnit)} may have side-effects (e.g trigger the underlying computation). + * + * @author bjorncs + */ +// TODO Vespa 8 remove ListenableFuture implementation +public abstract class ProcessingFuture<V> extends CompletableFuture<V> implements ListenableFuture<V> { + + @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; } + @Override public boolean isCancelled() { return false; } + + @Override public abstract V get() throws InterruptedException, ExecutionException; + @Override public abstract V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException; + + @Override + public void addListener(Runnable listener, Executor executor) { + whenCompleteAsync((__, ___) -> listener.run(), executor); + } + +} diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java index b77d493ea30..bb5fe7a1f76 100644 --- a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java +++ b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java @@ -2,12 +2,10 @@ package com.yahoo.processing.rendering; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.concurrent.ThreadFactoryFactory; import com.yahoo.jdisc.handler.CompletionHandler; import com.yahoo.jdisc.handler.ContentChannel; -import java.util.logging.Level; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; @@ -23,12 +21,14 @@ import java.util.ArrayDeque; import java.util.Collections; import java.util.Deque; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; import java.util.logging.Logger; /** @@ -126,7 +126,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e return executor; } - private SettableFuture<Boolean> success; + private CompletableFuture<Boolean> success; private ContentChannel channel; private CompletionHandler completionHandler; @@ -173,8 +173,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * @return a future indicating whether rendering was successful */ @Override - public final ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, - Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response, + Execution execution, Request request) { if (beforeHandoverMode) { // rendering has already started or is already complete beforeHandoverMode = false; if ( ! dataListListenerStack.isEmpty() && @@ -215,22 +215,31 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * At this point the worker thread still owns the Response, so all this rendering must happen * on the caller thread invoking freeze (that is, on the thread calling this). */ - public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response, - Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponseBeforeHandover(OutputStream stream, RESPONSE response, + Execution execution, Request request) { beforeHandoverMode = true; if ( ! isInitialized) throw new IllegalStateException("render() invoked before init()."); return startRender(stream, response, execution, request); } - private ListenableFuture<Boolean> startRender(OutputStream stream, RESPONSE response, + + /** @deprecated Use {@link #renderResponseBeforeHandover(OutputStream, Response, Execution, Request)} */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response, + Execution execution, Request request) { + return CompletableFutures.toGuavaListenableFuture(renderResponseBeforeHandover(stream, response, execution, request)); + } + + private CompletableFuture<Boolean> startRender(OutputStream stream, RESPONSE response, Execution execution, Request request) { this.response = response; this.stream = stream; this.execution = execution; DataListListener parentOfTopLevelListener = new DataListListener(new ParentOfTopLevel(request,response.data()), null); dataListListenerStack.addFirst(parentOfTopLevelListener); - success = SettableFuture.create(); + success = new CompletableFuture<>(); try { getExecutor().execute(parentOfTopLevelListener); } catch (RejectedExecutionException e) { @@ -247,7 +256,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e * inadvertently work ends up in async data producing threads in some cases. */ Executor getExecutor() { - return beforeHandoverMode ? MoreExecutors.directExecutor() : renderingExecutor; + return beforeHandoverMode ? Runnable::run : renderingExecutor; } /** For inspection only; use getExecutor() for execution */ Executor getRenderingExecutor() { return renderingExecutor; } @@ -350,10 +359,10 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e return; // Called on completion of a list which is not frozen yet - hold off until frozen if ( ! beforeHandoverMode) - list.complete().get(); // trigger completion if not done already to invoke any listeners on that event + list.completeFuture().get(); // trigger completion if not done already to invoke any listeners on that event boolean startedRendering = renderData(); if ( ! startedRendering || uncompletedChildren > 0) return; // children must render to completion first - if (list.complete().isDone()) // might not be when in before handover mode + if (list.completeFuture().isDone()) // might not be when in before handover mode endListLevel(); else stream.flush(); @@ -435,8 +444,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e flushIfLikelyToSuspend(subList); subList.addFreezeListener(listListener, getExecutor()); - subList.complete().addListener(listListener, getExecutor()); - subList.incoming().completed().addListener(listListener, getExecutor()); + subList.completeFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor()); + subList.incoming().completedFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor()); } private boolean isOrdered(DataList dataList) { @@ -471,11 +480,11 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e logger.log(Level.WARNING, "Exception caught while closing stream to client.", e); } finally { if (failed != null) { - success.setException(failed); + success.completeExceptionally(failed); } else if (closeException != null) { - success.setException(closeException); + success.completeExceptionally(closeException); } else { - success.set(true); + success.complete(true); } if (channel != null) { channel.close(completionHandler); @@ -541,7 +550,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e } catch (Exception ignored) { } } - success.setException(e); + success.completeExceptionally(e); } } } catch (Error e) { diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java index 14ec3002b0a..8db4ed4f624 100644 --- a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java +++ b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java @@ -3,11 +3,13 @@ package com.yahoo.processing.rendering; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.AbstractComponent; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; import java.io.OutputStream; +import java.util.concurrent.CompletableFuture; /** * Renders a response to a stream. The renderers are cloned just before @@ -41,6 +43,17 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon } /** + * @deprecated Use/implement {@link #renderResponse(OutputStream, Response, Execution, Request)} instead. + * Return type changed from {@link ListenableFuture} to {@link CompletableFuture}. + */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") + public ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, Execution execution, + Request request) { + return CompletableFutures.toGuavaListenableFuture(renderResponse(stream, response, execution, request)); + } + + /** * Render a response to a stream. The stream also exposes a ByteBuffer API * for efficient transactions to JDisc. The returned future will throw the * exception causing failure wrapped in an ExecutionException if rendering @@ -50,10 +63,13 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon * @param response the response to render * @param execution the execution which created this response * @param request the request matching the response - * @return a ListenableFuture containing a boolean where true indicates a successful rendering + * @return a {@link CompletableFuture} containing a boolean where true indicates a successful rendering */ - public abstract ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, - Execution execution, Request request); + @SuppressWarnings("removal") + public CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response, + Execution execution, Request request) { + return CompletableFutures.toCompletableFuture(render(stream, response, execution, request)); + } /** * Name of the output encoding, if applicable. diff --git a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java index 4633ac5ec1c..b1ce0643487 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java +++ b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java @@ -1,15 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.AbstractFuture; -import com.google.common.util.concurrent.ExecutionList; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.provider.ListenableFreezableClass; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.processing.Request; +import com.yahoo.processing.impl.ProcessingFuture; -import java.util.ArrayList; -import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -34,7 +32,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ private final IncomingData<DATATYPE> incomingData; - private final ListenableFuture<DataList<DATATYPE>> completedFuture; + private final CompletableFuture<DataList<DATATYPE>> completedFuture; /** * Creates a simple data list which does not allow late incoming data @@ -94,10 +92,15 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable return incomingData; } + @Override + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<DataList<DATATYPE>> complete() { - return completedFuture; + return CompletableFutures.toGuavaListenableFuture(completedFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completeFuture() { return completedFuture; } + @Override public boolean isOrdered() { return ordered; } @@ -108,7 +111,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable return super.toString() + (complete().isDone() ? " [completed]" : " [incomplete, " + incoming() + "]"); } - public static final class DrainOnGetFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> { + public static final class DrainOnGetFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> { private final DataList<DATATYPE> owner; @@ -137,7 +140,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ @Override public DataList<DATATYPE> get() throws InterruptedException, ExecutionException { - return drain(owner.incoming().completed().get()); + return drain(owner.incoming().completedFuture().get()); } /** @@ -146,13 +149,13 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable */ @Override public DataList<DATATYPE> get(long timeout, TimeUnit timeUnit) throws InterruptedException, ExecutionException, TimeoutException { - return drain(owner.incoming().completed().get(timeout, timeUnit)); + return drain(owner.incoming().completedFuture().get(timeout, timeUnit)); } private DataList<DATATYPE> drain(DataList<DATATYPE> dataList) { for (DATATYPE item : dataList.incoming().drain()) dataList.add(item); - set(dataList); // Signal completion to listeners + complete(dataList); // Signal completion to listeners return dataList; } diff --git a/container-core/src/main/java/com/yahoo/processing/response/DataList.java b/container-core/src/main/java/com/yahoo/processing/response/DataList.java index d566e201375..dbda8983f12 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/DataList.java +++ b/container-core/src/main/java/com/yahoo/processing/response/DataList.java @@ -1,11 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.ExecutionList; import com.google.common.util.concurrent.ListenableFuture; import java.util.List; -import java.util.concurrent.Executor; +import java.util.concurrent.CompletableFuture; /** * A list of data items created due to a processing request. @@ -73,6 +72,10 @@ public interface DataList<DATATYPE extends Data> extends Data { * Making this call on a list which does not support future data always returns immediately and * causes no memory synchronization cost. */ + CompletableFuture<DataList<DATATYPE>> completeFuture(); + + /** @deprecated Use {@link #completeFuture()} instead */ + @Deprecated(forRemoval = true, since = "7") ListenableFuture<DataList<DATATYPE>> complete(); /** diff --git a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java index 619e554f45c..813d6ac54d8 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java +++ b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java @@ -2,12 +2,13 @@ package com.yahoo.processing.response; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.collections.Tuple2; +import com.yahoo.concurrent.CompletableFutures; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; /** @@ -19,7 +20,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< private DataList<DATATYPE> owner = null; - private final SettableFuture<DataList<DATATYPE>> completionFuture; + private final CompletableFuture<DataList<DATATYPE>> completionFuture; private final List<DATATYPE> dataList = new ArrayList<>(); @@ -35,7 +36,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< public DefaultIncomingData(DataList<DATATYPE> owner) { assignOwner(owner); - completionFuture = SettableFuture.create(); + completionFuture = new CompletableFuture<>(); } /** Assigns the owner of this. Throws an exception if the owner is already set. */ @@ -50,10 +51,14 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< } @Override + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") public ListenableFuture<DataList<DATATYPE>> completed() { - return completionFuture; + return CompletableFutures.toGuavaListenableFuture(completionFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; } + /** Returns whether the data in this is complete */ @Override public synchronized boolean isComplete() { @@ -92,7 +97,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData< @Override public synchronized void markComplete() { complete = true; - completionFuture.set(owner); + completionFuture.complete(owner); } /** diff --git a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java index d589b7dd195..25c230e383f 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java +++ b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.ForwardingFuture; -import com.google.common.util.concurrent.ListenableFutureTask; import com.yahoo.processing.Request; import com.yahoo.processing.Response; import com.yahoo.processing.execution.Execution; @@ -10,6 +8,8 @@ import com.yahoo.processing.request.ErrorMessage; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.logging.Level; @@ -20,9 +20,10 @@ import java.util.logging.Logger; * * @author bratseth */ -public class FutureResponse extends ForwardingFuture<Response> { +public class FutureResponse implements Future<Response> { private final Request request; + private final FutureTask<Response> task; /** * Only used for generating messages @@ -31,24 +32,23 @@ public class FutureResponse extends ForwardingFuture<Response> { private final static Logger log = Logger.getLogger(FutureResponse.class.getName()); - private final ListenableFutureTask<Response> futureTask; - public FutureResponse(final Callable<Response> callable, Execution execution, final Request request) { - this.futureTask = ListenableFutureTask.create(callable); + this.task = new FutureTask<>(callable); this.request = request; this.execution = execution; } - @Override - public ListenableFutureTask<Response> delegate() { - return futureTask; - } + public FutureTask<Response> delegate() { return task; } + + @Override public boolean cancel(boolean mayInterruptIfRunning) { return task.cancel(mayInterruptIfRunning); } + @Override public boolean isCancelled() { return task.isCancelled(); } + @Override public boolean isDone() { return task.isDone(); } public @Override Response get() { try { - return super.get(); + return task.get(); } catch (InterruptedException e) { return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e)); } catch (ExecutionException e) { @@ -61,7 +61,7 @@ public class FutureResponse extends ForwardingFuture<Response> { @Override Response get(long timeout, TimeUnit timeunit) { try { - return super.get(timeout, timeunit); + return task.get(timeout, timeunit); } catch (InterruptedException e) { return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e)); } catch (ExecutionException e) { diff --git a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java index 371c1bca45f..54ba0fa8031 100644 --- a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java +++ b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java @@ -1,11 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.response; -import com.google.common.util.concurrent.AbstractFuture; import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.concurrent.CompletableFutures; +import com.yahoo.processing.impl.ProcessingFuture; import java.util.Collections; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; @@ -35,6 +37,10 @@ public interface IncomingData<DATATYPE extends Data> { * <p> * This return the list owning this for convenience. */ + CompletableFuture<DataList<DATATYPE>> completedFuture(); + + /** @deprecated Use {@link #completedFuture()} instead */ + @Deprecated(forRemoval = true, since = "7") ListenableFuture<DataList<DATATYPE>> completed(); /** @@ -108,10 +114,15 @@ public interface IncomingData<DATATYPE extends Data> { completionFuture = new ImmediateFuture<>(owner); } + @Override + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<DataList<DATATYPE>> completed() { - return completionFuture; + return CompletableFutures.toGuavaListenableFuture(completionFuture); } + @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; } + @Override public DataList<DATATYPE> getOwner() { return owner; @@ -178,13 +189,13 @@ public interface IncomingData<DATATYPE extends Data> { * This is semantically the same as Futures.immediateFuture but contrary to it, * this never causes any memory synchronization when accessed. */ - public static class ImmediateFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> { + public static class ImmediateFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> { - private DataList<DATATYPE> owner; + private final DataList<DATATYPE> owner; public ImmediateFuture(DataList<DATATYPE> owner) { this.owner = owner; // keep here to avoid memory synchronization for access - set(owner); // Signal completion (for future listeners) + complete(owner); // Signal completion (for future listeners) } @Override diff --git a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java index aebbc3f538d..ee8dbd8dccb 100644 --- a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java +++ b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.processing.test; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.component.chain.Chain; import com.yahoo.processing.Processor; import com.yahoo.processing.Request; @@ -15,6 +13,7 @@ import com.yahoo.processing.request.ErrorMessage; import com.yahoo.processing.response.*; import java.util.*; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; /** @@ -288,7 +287,7 @@ public class ProcessorLibrary { private final boolean ordered, streamed; /** The incoming data this has created */ - public final SettableFuture<IncomingData> incomingData = SettableFuture.create(); + public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>(); /** Create an instance which returns ordered, streamable data */ public ListenableFutureDataSource() { this(true, true); } @@ -307,7 +306,7 @@ public class ProcessorLibrary { dataList = ArrayDataList.createAsyncNonstreamed(request); else dataList = ArrayDataList.createAsync(request); - incomingData.set(dataList.incoming()); + incomingData.complete(dataList.incoming()); return new Response(dataList); } @@ -317,12 +316,12 @@ public class ProcessorLibrary { public static class RequestCounter extends Processor { /** The incoming data this has created */ - public final SettableFuture<IncomingData> incomingData = SettableFuture.create(); + public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>(); @Override public Response process(Request request, Execution execution) { ArrayDataList dataList = ArrayDataList.createAsync(request); - incomingData.set(dataList.incoming()); + incomingData.complete(dataList.incoming()); return new Response(dataList); } @@ -354,7 +353,7 @@ public class ProcessorLibrary { // wait for other executions and merge the responses for (Response additionalResponse : AsyncExecution.waitForAll(futures, 1000)) { - additionalResponse.data().complete().get(); // block until we have all the data elements + additionalResponse.data().completeFuture().get(); // block until we have all the data elements for (Object item : additionalResponse.data().asList()) response.data().add((Data) item); response.mergeWith(additionalResponse); @@ -382,9 +381,10 @@ public class ProcessorLibrary { public Response process(Request request, Execution execution) { Response response = execution.process(request); // TODO: Consider for to best provide helpers for this - response.data().complete().addListener(new RunnableExecution(request, - new ExecutionWithResponse(asyncChain, response, execution)), - MoreExecutors.directExecutor()); + response.data().completeFuture().whenComplete( + (__, ___) -> + new RunnableExecution(request, new ExecutionWithResponse(asyncChain, response, execution)) + .run()); return response; } diff --git a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java index 0f16aed3d0b..efcf608b6f0 100644 --- a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java @@ -22,7 +22,7 @@ public class ResponseTestCase { * Check the recursive toString printing along the way. * List variable names ends by numbers specifying the index of the list at each level. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testRecursiveCompletionAndToString() throws InterruptedException, ExecutionException { // create lists diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java index 40e7384c745..2fb32271419 100644 --- a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java @@ -25,7 +25,7 @@ import static org.junit.Assert.assertEquals; public class FutureDataTestCase { /** Run a chain which ends in a processor which returns a response containing future data. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testFutureDataPassThrough() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -52,7 +52,7 @@ public class FutureDataTestCase { } /** Federate to one source which returns data immediately and one who return future data */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testFederateSyncAndAsyncData() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -88,7 +88,7 @@ public class FutureDataTestCase { } /** Register a chain which will be called when some async data is available */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testAsyncDataProcessing() throws InterruptedException, ExecutionException, TimeoutException { // Set up @@ -120,7 +120,7 @@ public class FutureDataTestCase { * When the first of the futures are done one additional chain is to be run. * When both are done another chain is to be run. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testAsyncDataProcessingOfFederatedResult() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java index 1ebf01c5f33..bd1307ff77c 100644 --- a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java +++ b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java @@ -13,7 +13,6 @@ import com.yahoo.processing.test.ProcessorLibrary; import org.junit.Test; import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -27,7 +26,7 @@ import static org.junit.Assert.assertEquals; public class StreamingTestCase { /** Tests adding a chain which is called every time new data is added to a data list */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testStreamingData() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java index ce2b54ba6ff..627081e0d3b 100644 --- a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java +++ b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java @@ -15,7 +15,6 @@ import com.yahoo.processing.response.DataList; import com.yahoo.processing.response.IncomingData; import com.yahoo.text.Utf8; import org.junit.Test; -import static org.junit.Assert.*; import java.io.IOException; import java.io.OutputStream; @@ -23,10 +22,16 @@ import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; /** * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> @@ -222,7 +227,7 @@ public class AsynchronousSectionedRendererTest { return render(renderer, data); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) public String render(Renderer renderer, DataList data) throws InterruptedException, IOException { TestContentChannel contentChannel = new TestContentChannel(); @@ -403,6 +408,7 @@ public class AsynchronousSectionedRendererTest { } @Override + @SuppressWarnings("removal") public ListenableFuture<DataList<StringData>> complete() { return new ListenableFuture<DataList<StringData>>() { @Override @@ -438,6 +444,11 @@ public class AsynchronousSectionedRendererTest { } @Override + public CompletableFuture<DataList<StringData>> completeFuture() { + return CompletableFuture.completedFuture(this); + } + + @Override public String getString() { return list.toString(); } diff --git a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java index 67a6634b659..21731f7d714 100644 --- a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java +++ b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java @@ -3,8 +3,12 @@ package com.yahoo.processing.test.documentation; import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.component.chain.Chain; -import com.yahoo.processing.*; -import com.yahoo.processing.execution.*; +import com.yahoo.processing.Processor; +import com.yahoo.processing.Request; +import com.yahoo.processing.Response; +import com.yahoo.processing.execution.Execution; +import com.yahoo.processing.execution.ExecutionWithResponse; +import com.yahoo.processing.execution.RunnableExecution; /** * A processor which registers a listener on the future completion of @@ -18,6 +22,7 @@ public class AsyncDataProcessingInitiator extends Processor { this.asyncChain=asyncChain; } + @SuppressWarnings({"removal"}) @Override public Response process(Request request, Execution execution) { Response response=execution.process(request); diff --git a/container-disc/abi-spec.json b/container-disc/abi-spec.json index 735211ff47c..d924d5196b9 100644 --- a/container-disc/abi-spec.json +++ b/container-disc/abi-spec.json @@ -109,8 +109,10 @@ "public void <init>()", "public void <init>(com.yahoo.container.jdisc.secretstore.SecretStoreConfig)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Groups$Builder)", + "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(java.util.function.Consumer)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder groups(java.util.List)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(com.yahoo.container.jdisc.secretstore.SecretStoreConfig$AwsParameterStores$Builder)", + "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(java.util.function.Consumer)", "public com.yahoo.container.jdisc.secretstore.SecretStoreConfig$Builder awsParameterStores(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java index 7bb01e76b43..879778487f5 100644 --- a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java +++ b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java @@ -62,10 +62,10 @@ public class GarbageCollectionMetrics { for(Iterator<Map.Entry<String, LinkedList<GcStats>>> it = gcStatistics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, LinkedList<GcStats>> entry = it.next(); LinkedList<GcStats> history = entry.getValue(); - while(history.isEmpty() == false && oldestToKeep.isAfter(history.getFirst().when)) { + while( ! history.isEmpty() && oldestToKeep.isAfter(history.getFirst().when)) { history.removeFirst(); } - if(history.isEmpty()) { + if (history.isEmpty()) { it.remove(); } } diff --git a/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java b/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java index 9e009d4a40d..64b01bd047f 100644 --- a/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java +++ b/container-messagebus/src/test/java/com/yahoo/messagebus/jdisc/MbusRequestHandlerTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.messagebus.jdisc; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -14,6 +13,7 @@ import com.yahoo.messagebus.test.SimpleMessage; import org.junit.Test; import java.net.URI; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -81,7 +81,7 @@ public class MbusRequestHandlerTestCase { return driver; } - private static ListenableFuture<Response> dispatchMessage(final TestDriver driver, final Message msg) { + private static CompletableFuture<Response> dispatchMessage(final TestDriver driver, final Message msg) { return new RequestDispatch() { @Override diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json index 183bb33b4f4..9e3d165b81d 100644 --- a/container-search/abi-spec.json +++ b/container-search/abi-spec.json @@ -4301,6 +4301,8 @@ "public void <init>(int, com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer)", "public com.google.common.util.concurrent.ListenableFuture waitableRender(java.io.OutputStream)", "public static com.google.common.util.concurrent.ListenableFuture waitableRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)", + "public java.util.concurrent.CompletableFuture asyncRender(java.io.OutputStream)", + "public static java.util.concurrent.CompletableFuture asyncRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)", "public void render(java.io.OutputStream, com.yahoo.jdisc.handler.ContentChannel, com.yahoo.jdisc.handler.CompletionHandler)", "public void populateAccessLogEntry(com.yahoo.container.logging.AccessLogEntry)", "public java.lang.String getParsedQuery()", @@ -4672,6 +4674,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.pagetemplates.ResolversConfig)", "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(com.yahoo.search.pagetemplates.ResolversConfig$Component$Builder)", + "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(java.util.function.Consumer)", "public com.yahoo.search.pagetemplates.ResolversConfig$Builder component(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -6963,6 +6966,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.query.rewrite.RewritesConfig)", "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(com.yahoo.search.query.rewrite.RewritesConfig$FsaDict$Builder)", + "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(java.util.function.Consumer)", "public com.yahoo.search.query.rewrite.RewritesConfig$Builder fsaDict(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -7201,13 +7205,13 @@ ], "methods": [ "public void <init>()", - "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "protected abstract void render(java.io.Writer, com.yahoo.search.Result)", "public java.lang.String getCharacterEncoding(com.yahoo.search.Result)", "public java.lang.String getDefaultSummaryClass()", "public final java.lang.String getRequestedEncoding(com.yahoo.search.Query)", "public com.yahoo.search.rendering.Renderer clone()", - "public bridge synthetic com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", + "public bridge synthetic java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)", "public bridge synthetic com.yahoo.processing.rendering.Renderer clone()", "public bridge synthetic com.yahoo.component.AbstractComponent clone()", "public bridge synthetic java.lang.Object clone()" @@ -7703,6 +7707,7 @@ "public java.util.Set getFilled()", "public com.yahoo.processing.response.IncomingData incoming()", "public com.google.common.util.concurrent.ListenableFuture complete()", + "public java.util.concurrent.CompletableFuture completeFuture()", "public void addDataListener(java.lang.Runnable)", "public void close()", "public bridge synthetic com.yahoo.search.result.Hit clone()", @@ -8423,6 +8428,7 @@ "public void <init>()", "public void <init>(com.yahoo.search.statistics.TimingSearcherConfig)", "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(com.yahoo.search.statistics.TimingSearcherConfig$Timer$Builder)", + "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(java.util.function.Consumer)", "public com.yahoo.search.statistics.TimingSearcherConfig$Builder timer(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java index 5c897245e64..64e7403fa1a 100644 --- a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java +++ b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java @@ -3,6 +3,7 @@ package com.yahoo.search.handler; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.collections.ListMap; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.container.handler.Coverage; import com.yahoo.container.handler.Timing; import com.yahoo.container.jdisc.ExtendedResponse; @@ -25,6 +26,7 @@ import java.io.OutputStream; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; /** * Wrap the result of a query as an HTTP response. @@ -75,20 +77,36 @@ public class HttpSearchResponse extends ExtendedResponse { } } + /** @deprecated Use {@link #asyncRender(OutputStream)} instead */ + @Deprecated(forRemoval = true, since = "7") public ListenableFuture<Boolean> waitableRender(OutputStream stream) throws IOException { return waitableRender(result, query, rendererCopy, stream); } + /** @deprecated Use {@link #asyncRender(Result, Query, Renderer, OutputStream)} instead */ + @Deprecated(forRemoval = true, since = "7") + @SuppressWarnings("removal") public static ListenableFuture<Boolean> waitableRender(Result result, Query query, Renderer<Result> renderer, OutputStream stream) throws IOException { + return CompletableFutures.toGuavaListenableFuture(asyncRender(result, query, renderer, stream)); + } + + public CompletableFuture<Boolean> asyncRender(OutputStream stream) { + return asyncRender(result, query, rendererCopy, stream); + } + + public static CompletableFuture<Boolean> asyncRender(Result result, + Query query, + Renderer<Result> renderer, + OutputStream stream) { SearchResponse.trimHits(result); SearchResponse.removeEmptySummaryFeatureFields(result); - return renderer.render(stream, result, query.getModel().getExecution(), query); - + return renderer.renderResponse(stream, result, query.getModel().getExecution(), query); } + @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) throws IOException { if (rendererCopy instanceof AsynchronousSectionedRenderer) { @@ -98,9 +116,9 @@ public class HttpSearchResponse extends ExtendedResponse { try { try { long nanoStart = System.nanoTime(); - ListenableFuture<Boolean> promise = waitableRender(output); + CompletableFuture<Boolean> promise = asyncRender(output); if (metric != null) { - promise.addListener(new RendererLatencyReporter(nanoStart), Runnable::run); + promise.whenComplete((__, ___) -> new RendererLatencyReporter(nanoStart).run()); } } finally { if (!(rendererCopy instanceof AsynchronousSectionedRenderer)) { diff --git a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java index b8a7f0d1978..6ff8f003f7e 100644 --- a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java +++ b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java @@ -1,19 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import com.yahoo.search.Query; -import com.yahoo.search.Result; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.yahoo.io.ByteWriter; import com.yahoo.processing.Request; import com.yahoo.processing.execution.Execution; +import com.yahoo.search.Query; +import com.yahoo.search.Result; import java.io.IOException; import java.io.OutputStream; import java.io.Writer; import java.nio.charset.Charset; import java.nio.charset.CharsetEncoder; +import java.util.concurrent.CompletableFuture; /** * Renders a search result to a writer synchronously @@ -37,7 +36,7 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R * @return a future which is always completed to true */ @Override - public final ListenableFuture<Boolean> render(OutputStream stream, Result response, Execution execution, Request request) { + public final CompletableFuture<Boolean> renderResponse(OutputStream stream, Result response, Execution execution, Request request) { Writer writer = null; try { writer = createWriter(stream, response); @@ -50,8 +49,8 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R if (writer != null) try { writer.close(); } catch (IOException e2) {}; } - SettableFuture<Boolean> completed = SettableFuture.create(); - completed.set(true); + CompletableFuture<Boolean> completed = new CompletableFuture<>(); + completed.complete(true); return completed; } diff --git a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java index 1ae3f4e60cc..6d09bf66175 100644 --- a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java +++ b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java @@ -5,6 +5,7 @@ import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.collections.ListenableArrayList; +import com.yahoo.concurrent.CompletableFutures; import com.yahoo.net.URI; import com.yahoo.prelude.fastsearch.SortDataHitSorter; import com.yahoo.processing.response.ArrayDataList; @@ -19,6 +20,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; /** @@ -84,7 +86,7 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable< */ private DefaultErrorHit errorHit = null; - private final ListenableFuture<DataList<Hit>> completedFuture; + private final CompletableFuture<DataList<Hit>> completedFuture; private final IncomingData<Hit> incomingHits; @@ -965,7 +967,13 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable< public IncomingData<Hit> incoming() { return incomingHits; } @Override - public ListenableFuture<DataList<Hit>> complete() { return completedFuture; } + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") + public ListenableFuture<DataList<Hit>> complete() { + return CompletableFutures.toGuavaListenableFuture(completedFuture); + } + + @Override public CompletableFuture<DataList<Hit>> completeFuture() { return completedFuture; } @Override public void addDataListener(Runnable runnable) { diff --git a/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr index 32f8e86b59f..9a147887207 100644 --- a/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr +++ b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/expansion.sr @@ -5,4 +5,6 @@ equiv1 +> =equiv2 =equiv3; testfield:[test] -> =testfield:e1 =testfield:e2 =testfield:e3; +synonymfield:[test] -> =[test]; + [test] :- foo, bar, baz; diff --git a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java index 0819cbd72b4..b39c170c6a3 100644 --- a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java @@ -53,7 +53,7 @@ public class ExecutionAbstractTestCase { assertRendered(result,resultFileName,false); } - @SuppressWarnings("deprecation") + @SuppressWarnings({"deprecation", "removal"}) protected void assertRendered(Result result, String resultFileName, boolean print) { try { PageTemplatesXmlRenderer renderer = new PageTemplatesXmlRenderer(); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java index 359aed85d30..7db29568d5b 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java @@ -1,23 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.junit.Test; - -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.concurrent.Receiver; import com.yahoo.processing.response.Data; import com.yahoo.processing.response.DataList; @@ -29,6 +13,20 @@ import com.yahoo.search.result.HitGroup; import com.yahoo.search.result.Relevance; import com.yahoo.search.searchchain.Execution; import com.yahoo.text.Utf8; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test adding hits to a hit group during rendering. @@ -36,18 +34,20 @@ import com.yahoo.text.Utf8; * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a> */ public class AsyncGroupPopulationTestCase { - private static class WrappedFuture<F> implements ListenableFuture<F> { + private static class WrappedFuture<F> extends CompletableFuture<F> { Receiver<Boolean> isListening = new Receiver<>(); - private ListenableFuture<F> wrapped; + private final CompletableFuture<F> wrapped; - WrappedFuture(ListenableFuture<F> wrapped) { + WrappedFuture(CompletableFuture<F> wrapped) { this.wrapped = wrapped; } - public void addListener(Runnable listener, Executor executor) { - wrapped.addListener(listener, executor); + @Override + public CompletableFuture<F> whenCompleteAsync(BiConsumer<? super F, ? super Throwable> action, Executor executor) { + wrapped.whenCompleteAsync(action); isListening.put(Boolean.TRUE); + return this; } public boolean cancel(boolean mayInterruptIfRunning) { @@ -73,14 +73,14 @@ public class AsyncGroupPopulationTestCase { } private static class ObservableIncoming<DATATYPE extends Data> extends DefaultIncomingData<DATATYPE> { - WrappedFuture<DataList<DATATYPE>> waitForIt = null; + volatile WrappedFuture<DataList<DATATYPE>> waitForIt = null; private final Object lock = new Object(); @Override - public ListenableFuture<DataList<DATATYPE>> completed() { + public CompletableFuture<DataList<DATATYPE>> completedFuture() { synchronized (lock) { if (waitForIt == null) { - waitForIt = new WrappedFuture<>(super.completed()); + waitForIt = new WrappedFuture<>(super.completedFuture()); } } return waitForIt; @@ -99,7 +99,7 @@ public class AsyncGroupPopulationTestCase { @Test public final void test() throws InterruptedException, ExecutionException, - JsonParseException, JsonMappingException, IOException { + IOException { String rawExpected = "{" + " \"root\": {" + " \"children\": [" @@ -125,10 +125,10 @@ public class AsyncGroupPopulationTestCase { JsonRenderer renderer = new JsonRenderer(); Result result = new Result(new Query(), h); renderer.init(); - ListenableFuture<Boolean> f = renderer.render(out, result, + CompletableFuture<Boolean> f = renderer.renderResponse(out, result, new Execution(Execution.Context.createContextStub()), result.getQuery()); - WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completed(); + WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completedFuture(); x.isListening.get(86_400_000); h.incoming().add(new Hit("yahoo2")); h.incoming().markComplete(); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java index 7395b4802a0..f3a71af0b9e 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java @@ -364,6 +364,7 @@ public class JsonRendererTestCase { } @Test + @SuppressWarnings("removal") public void testEmptyTracing() throws IOException, InterruptedException, ExecutionException { String expected = "{" + " \"root\": {" @@ -391,7 +392,7 @@ public class JsonRendererTestCase { assertEqualJson(expected, summary); } - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "removal"}) @Test public void testTracingWithEmptySubtree() throws IOException, InterruptedException, ExecutionException { String expected = "{" @@ -1372,6 +1373,7 @@ public class JsonRendererTestCase { return render(execution, r); } + @SuppressWarnings("removal") private String render(Execution execution, Result r) throws InterruptedException, ExecutionException { ByteArrayOutputStream bs = new ByteArrayOutputStream(); ListenableFuture<Boolean> f = renderer.render(bs, r, execution, null); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java index ae1eade12d3..99911276f50 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java @@ -1,17 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.chain.Chain; import com.yahoo.prelude.fastsearch.FastHit; @@ -26,6 +15,15 @@ import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource; import com.yahoo.search.statistics.ElapsedTimeTestCase.UselessSearcher; import com.yahoo.search.statistics.TimeTracker; import com.yahoo.text.Utf8; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.util.concurrent.ExecutionException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Check the legacy sync default renderer doesn't spontaneously combust. @@ -56,7 +54,7 @@ public class SyncDefaultRendererTestCase { assertEquals("text/xml", d.getMimeType()); } - @SuppressWarnings("deprecation") + @SuppressWarnings({"deprecation", "removal"}) @Test public void testRenderWriterResult() throws InterruptedException, ExecutionException { Query q = new Query("/?query=a&tracelevel=5"); diff --git a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java index 0fad449763f..b3534d580d8 100644 --- a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java @@ -1,39 +1,36 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.rendering; -import static org.junit.Assert.*; - -import java.io.ByteArrayOutputStream; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - +import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentId; +import com.yahoo.component.chain.Chain; import com.yahoo.container.QrSearchersConfig; import com.yahoo.prelude.Index; import com.yahoo.prelude.IndexFacts; import com.yahoo.prelude.IndexModel; import com.yahoo.prelude.SearchDefinition; -import com.yahoo.prelude.searcher.JuniperSearcher; -import com.yahoo.search.result.Hit; -import com.yahoo.search.result.Relevance; -import com.yahoo.search.searchchain.Execution; -import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher; -import org.junit.Test; - -import com.google.common.util.concurrent.ListenableFuture; -import com.yahoo.component.chain.Chain; import com.yahoo.prelude.fastsearch.FastHit; +import com.yahoo.prelude.searcher.JuniperSearcher; import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; import com.yahoo.search.result.Coverage; import com.yahoo.search.result.ErrorMessage; +import com.yahoo.search.result.Hit; import com.yahoo.search.result.HitGroup; +import com.yahoo.search.result.Relevance; +import com.yahoo.search.searchchain.Execution; +import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher; import com.yahoo.search.statistics.ElapsedTimeTestCase; -import com.yahoo.search.statistics.TimeTracker; import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource; +import com.yahoo.search.statistics.TimeTracker; import com.yahoo.text.Utf8; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Test the XML renderer @@ -158,6 +155,7 @@ public class XMLRendererTestCase { assertTrue(summary.contains("<meta type=\"context\">")); } + @SuppressWarnings("removal") private String render(Result result) throws Exception { XmlRenderer renderer = new XmlRenderer(); renderer.init(); diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java index 9c36971f688..2426b18f018 100644 --- a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java @@ -2,7 +2,8 @@ package com.yahoo.search.searchchain.test; import com.yahoo.component.ComponentId; -import com.yahoo.processing.response.*; +import com.yahoo.component.chain.Chain; +import com.yahoo.processing.response.IncomingData; import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; @@ -11,18 +12,18 @@ import com.yahoo.search.federation.sourceref.SearchChainResolver; import com.yahoo.search.result.Hit; import com.yahoo.search.result.HitGroup; import com.yahoo.search.searchchain.Execution; - import com.yahoo.search.searchchain.SearchChainRegistry; import com.yahoo.search.searchchain.model.federation.FederationOptions; import org.junit.Test; -import static org.junit.Assert.*; import java.util.Collections; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.yahoo.component.chain.Chain; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; /** * Tests using the async capabilities of the Processing parent framework of searchers. @@ -31,6 +32,7 @@ import com.yahoo.component.chain.Chain; */ public class FutureDataTestCase { + @SuppressWarnings("removal") @Test public void testAsyncFederation() throws InterruptedException, ExecutionException { // Setup environment @@ -77,6 +79,7 @@ public class FutureDataTestCase { assertEquals("async:1", asyncGroup.get(1).getId().toString()); } + @SuppressWarnings("removal") @Test public void testFutureData() throws InterruptedException, ExecutionException, TimeoutException { // Set up diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java index c58bb0e5fab..943d6ac7b18 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java @@ -74,7 +74,6 @@ public class RoutingController { private final RoutingPolicies routingPolicies; private final RotationRepository rotationRepository; private final BooleanFlag hideSharedRoutingEndpoint; - private final BooleanFlag changeRoutingStatusOfAllUpstreams; public RoutingController(Controller controller, RotationsConfig rotationsConfig) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); @@ -83,7 +82,6 @@ public class RoutingController { controller.applications(), controller.curator()); this.hideSharedRoutingEndpoint = Flags.HIDE_SHARED_ROUTING_ENDPOINT.bindTo(controller.flagSource()); - this.changeRoutingStatusOfAllUpstreams = Flags.CHANGE_ROUTING_STATUS_OF_ALL_UPSTREAMS.bindTo(controller.flagSource()); } /** Create a routing context for given deployment */ @@ -92,8 +90,7 @@ public class RoutingController { return new SharedDeploymentRoutingContext(deployment, this, controller.serviceRegistry().configServer(), - controller.clock(), - changeRoutingStatusOfAllUpstreams.value()); + controller.clock()); } return new ExclusiveDeploymentRoutingContext(deployment, this); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java index 3124d836e54..e5eb1382ccf 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java @@ -72,21 +72,15 @@ public abstract class DeploymentRoutingContext implements RoutingContext { private final Clock clock; private final ConfigServer configServer; - private final boolean changeAllUpstreams; - public SharedDeploymentRoutingContext(DeploymentId deployment, RoutingController controller, ConfigServer configServer, Clock clock, boolean changeAllUpstreams) { + public SharedDeploymentRoutingContext(DeploymentId deployment, RoutingController controller, ConfigServer configServer, Clock clock) { super(deployment, RoutingMethod.shared, controller); this.clock = Objects.requireNonNull(clock); this.configServer = Objects.requireNonNull(configServer); - this.changeAllUpstreams = changeAllUpstreams; } @Override public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { - if (!changeAllUpstreams) { - setLegacyRoutingStatus(value, agent); - return; - } EndpointStatus newStatus = new EndpointStatus(value == RoutingStatus.Value.in ? EndpointStatus.Status.in : EndpointStatus.Status.out, @@ -101,10 +95,6 @@ public abstract class DeploymentRoutingContext implements RoutingContext { @Override public RoutingStatus routingStatus() { - if (!changeAllUpstreams) { - return legacyRoutingStatus(); - } - // In a given deployment, all upstreams (clusters) share the same status, so we can query using any // upstream name String upstreamName = upstreamNames().get(0); @@ -136,40 +126,6 @@ public abstract class DeploymentRoutingContext implements RoutingContext { return upstreamNames; } - private void setLegacyRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) { - EndpointStatus newStatus = new EndpointStatus(value == RoutingStatus.Value.in - ? EndpointStatus.Status.in - : EndpointStatus.Status.out, - agent.name(), - clock.instant()); - primaryEndpoint().ifPresent(endpoint -> { - try { - configServer.setGlobalRotationStatus(deployment, List.of(endpoint.upstreamName(deployment)), newStatus); - } catch (Exception e) { - throw new RuntimeException("Failed to set rotation status of " + endpoint + " in " + deployment, e); - } - }); - } - - private RoutingStatus legacyRoutingStatus() { - Optional<EndpointStatus> status = primaryEndpoint().map(endpoint -> { - var upstreamName = endpoint.upstreamName(deployment); - return configServer.getGlobalRotationStatus(deployment, upstreamName); - }); - if (status.isEmpty()) return RoutingStatus.DEFAULT; - RoutingStatus.Agent agent; - try { - agent = RoutingStatus.Agent.valueOf(status.get().agent().toLowerCase()); - } catch (IllegalArgumentException e) { - agent = RoutingStatus.Agent.unknown; - } - return new RoutingStatus(status.get().status() == EndpointStatus.Status.in - ? RoutingStatus.Value.in - : RoutingStatus.Value.out, - agent, - status.get().changedAt()); - } - private Optional<Endpoint> primaryEndpoint() { return controller.readDeclaredEndpointsOf(deployment.applicationId()) .requiresRotation() diff --git a/default_build_settings.cmake b/default_build_settings.cmake index b0dfed2bfd5..599aca098ec 100644 --- a/default_build_settings.cmake +++ b/default_build_settings.cmake @@ -32,16 +32,22 @@ function(setup_vespa_default_build_settings_centos_8) message("-- Setting up default build settings for centos 8") set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream") - set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "13" PARENT_SCOPE) else() set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) endif() endfunction() -function(setup_vespa_default_build_settings_rocky_8_4) - message("-- Setting up default build settings for rocky 8.4") +function(setup_vespa_default_build_settings_rocky_8_5) + message("-- Setting up default build settings for rocky 8.5") set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) - set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) +endfunction() + +function(setup_vespa_default_build_settings_almalinux_8_5) + message("-- Setting up default build settings for almalinux 8.5") + set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE) + set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE) endfunction() function(setup_vespa_default_build_settings_darwin) @@ -192,8 +198,10 @@ function(vespa_use_default_build_settings) setup_vespa_default_build_settings_centos_7() elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "centos 8") setup_vespa_default_build_settings_centos_8() - elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.4") - setup_vespa_default_build_settings_rocky_8_4() + elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.5") + setup_vespa_default_build_settings_rocky_8_5() + elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "almalinux 8.5") + setup_vespa_default_build_settings_almalinux_8_5() elseif(VESPA_OS_DISTRO STREQUAL "darwin") setup_vespa_default_build_settings_darwin() elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "fedora 32") diff --git a/dist/vespa.spec b/dist/vespa.spec index 3c96c6b0ce1..f18c802d5fc 100644 --- a/dist/vespa.spec +++ b/dist/vespa.spec @@ -62,10 +62,18 @@ BuildRequires: vespa-pybind11-devel BuildRequires: python3-devel %endif %if 0%{?el8} +%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0) +%if 0%{?_centos_stream} +BuildRequires: gcc-toolset-11-gcc-c++ +BuildRequires: gcc-toolset-11-binutils +BuildRequires: gcc-toolset-11-libatomic-devel +%define _devtoolset_enable /opt/rh/gcc-toolset-11/enable +%else BuildRequires: gcc-toolset-10-gcc-c++ BuildRequires: gcc-toolset-10-binutils BuildRequires: gcc-toolset-10-libatomic-devel %define _devtoolset_enable /opt/rh/gcc-toolset-10/enable +%endif BuildRequires: maven BuildRequires: pybind11-devel BuildRequires: python3-pytest @@ -102,9 +110,8 @@ BuildRequires: cmake >= 3.11.4-3 BuildRequires: libarchive %endif %define _command_cmake cmake -%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0) %if 0%{?_centos_stream} -BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13) +BuildRequires: (llvm-devel >= 13.0.0 and llvm-devel < 14) %else BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13) %endif @@ -255,7 +262,7 @@ Requires: vespa-gtest = 1.11.0 %if 0%{?el8} %if 0%{?centos} || 0%{?rocky} %if 0%{?_centos_stream} -%define _vespa_llvm_version 12 +%define _vespa_llvm_version 13 %else %define _vespa_llvm_version 12 %endif @@ -379,7 +386,7 @@ Requires: openssl-libs %if 0%{?el8} %if 0%{?centos} || 0%{?rocky} %if 0%{?_centos_stream} -Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13) +Requires: (llvm-libs >= 13.0.0 and llvm-libs < 14) %else Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13) %endif diff --git a/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java b/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java index ed996f56078..a35dd0da4f3 100644 --- a/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java +++ b/docprocs/src/test/java/com/yahoo/docprocs/indexing/ScriptManagerTestCase.java @@ -20,8 +20,7 @@ public class ScriptManagerTestCase { @Test public void requireThatScriptsAreAppliedToSubType() throws ParseException { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); DocumentType docType = typeMgr.getDocumentType("newssummary"); assertNotNull(docType); @@ -36,8 +35,7 @@ public class ScriptManagerTestCase { @Test public void requireThatScriptsAreAppliedToSuperType() throws ParseException { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); DocumentType docType = typeMgr.getDocumentType("newsarticle"); assertNotNull(docType); @@ -51,16 +49,14 @@ public class ScriptManagerTestCase { @Test public void requireThatEmptyConfigurationDoesNotThrow() { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); ScriptManager scriptMgr = new ScriptManager(typeMgr, new IlscriptsConfig(new IlscriptsConfig.Builder()), null, Embedder.throwsOnUse); assertNull(scriptMgr.getScript(new DocumentType("unknown"))); } @Test public void requireThatUnknownDocumentTypeReturnsNull() { - DocumentTypeManager typeMgr = new DocumentTypeManager(); - typeMgr.configure("file:src/test/cfg/documentmanager_inherit.cfg"); + var typeMgr = DocumentTypeManager.fromFile("src/test/cfg/documentmanager_inherit.cfg"); ScriptManager scriptMgr = new ScriptManager(typeMgr, new IlscriptsConfig(new IlscriptsConfig.Builder()), null, Embedder.throwsOnUse); for (Iterator<DocumentType> it = typeMgr.documentTypeIterator(); it.hasNext(); ) { assertNull(scriptMgr.getScript(it.next())); diff --git a/document/abi-spec.json b/document/abi-spec.json index 9112d84169f..d5ad686cd1f 100644 --- a/document/abi-spec.json +++ b/document/abi-spec.json @@ -501,6 +501,7 @@ "public void <init>(com.yahoo.document.config.DocumentmanagerConfig)", "public void assign(com.yahoo.document.DocumentTypeManager)", "public com.yahoo.document.DocumentTypeManager configure(java.lang.String)", + "public static com.yahoo.document.DocumentTypeManager fromFile(java.lang.String)", "public boolean hasDataType(java.lang.String)", "public boolean hasDataType(int)", "public com.yahoo.document.DataType getDataType(java.lang.String)", diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java index 80ceac457b9..ff6a7194e7d 100644 --- a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java +++ b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java @@ -65,11 +65,24 @@ public class DocumentTypeManager { annotationTypeRegistry = other.annotationTypeRegistry; } + /** + * For testing, use fromFile factory method instead + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public DocumentTypeManager configure(String configId) { subscriber = DocumentTypeManagerConfigurer.configure(this, configId); return this; } + /** Only for unit tests */ + public static DocumentTypeManager fromFile(String fileName) { + var manager = new DocumentTypeManager(); + var sub = DocumentTypeManagerConfigurer.configure(manager, "file:" + fileName); + sub.close(); + return manager; + } + private void registerDefaultDataTypes() { DocumentType superDocType = DataType.DOCUMENT; dataTypes.put(superDocType.getId(), superDocType); @@ -104,6 +117,10 @@ public class DocumentTypeManager { return false; } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public boolean hasDataType(int code) { if (code == DataType.tensorDataTypeCode) return true; // built-in dynamic: Always present return dataTypes.containsKey(code); @@ -140,6 +157,10 @@ public class DocumentTypeManager { return foundTypes.get(0); } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public DataType getDataType(int code) { return getDataType(code, ""); } /** @@ -148,7 +169,10 @@ public class DocumentTypeManager { * @param code the code of the data type to return, which must be either built in or present in this manager * @param detailedType detailed type information, or the empty string if none * @return the appropriate DataType instance + * + * @deprecated //TODO Will be package-private or removed on Vespa 8 */ + @Deprecated public DataType getDataType(int code, String detailedType) { if (code == DataType.tensorDataTypeCode) // built-in dynamic return new TensorDataType(TensorType.fromSpec(detailedType)); @@ -165,7 +189,11 @@ public class DocumentTypeManager { } } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ @SuppressWarnings("deprecation") + @Deprecated DataType getDataTypeAndReturnTemporary(int code, String detailedType) { if (hasDataType(code)) { return getDataType(code, detailedType); @@ -277,6 +305,7 @@ public class DocumentTypeManager { DocumentDeserializer data = DocumentDeserializerFactory.create6(this, buf); return new Document(data); } + public Document createDocument(DocumentDeserializer data) { return new Document(data); } @@ -305,7 +334,10 @@ public class DocumentTypeManager { /** * Clears the DocumentTypeManager. After this operation, * only the default document type and data types are available. + * + * @deprecated //TODO Will be package-private or removed on Vespa 8 */ + @Deprecated public void clear() { documentTypes.clear(); dataTypes.clear(); @@ -316,6 +348,10 @@ public class DocumentTypeManager { return annotationTypeRegistry; } + /** + * @deprecated //TODO Will be package-private or removed on Vespa 8 + */ + @Deprecated public void shutdown() { if (subscriber!=null) subscriber.close(); } diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java index 1f9e494aa29..e43ff26272a 100644 --- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java +++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java @@ -11,15 +11,19 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.HashSet; +import java.util.Set; import java.util.logging.Logger; import java.util.stream.Collectors; +import java.util.function.Supplier; +import com.yahoo.tensor.TensorType; /** * Configures the Vespa document manager from a config id. * * @author Einar M R Rosenvinge */ -public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig>{ +public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig> { private final static Logger log = Logger.getLogger(DocumentTypeManagerConfigurer.class.getName()); @@ -65,20 +69,22 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub return; } new Apply(config, manager); + if (config.datatype().size() == 0 && config.annotationtype().size() == 0) { + new ApplyNewDoctypeConfig(config, manager); + } } private static class Apply { + public Apply(DocumentmanagerConfig config, DocumentTypeManager manager) { this.manager = manager; - this.usev8geopositions = (config == null) ? false : config.usev8geopositions(); - if (config != null) { - apply(config); - } + this.usev8geopositions = config.usev8geopositions(); + apply(config); } - private Map<Integer, DataType> typesById = new HashMap<>(); - private Map<String, DataType> typesByName = new HashMap<>(); - private Map<Integer, DocumentmanagerConfig.Datatype> configMap = new HashMap<>(); + private final Map<Integer, DataType> typesById = new HashMap<>(); + private final Map<String, DataType> typesByName = new HashMap<>(); + private final Map<Integer, DocumentmanagerConfig.Datatype> configMap = new HashMap<>(); private void inProgress(DataType type) { var old = typesById.put(type.getId(), type); @@ -109,15 +115,12 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub .collect(Collectors.toUnmodifiableSet()); DocumentType type = new DocumentType(doc.name(), header, importedFields); if (id != type.getId()) { + typesById.put(id, type); // really old stuff, should rewrite tests using this: int alt = (doc.name()+"."+doc.version()).hashCode(); - if (id == alt) { - typesById.put(id, type); - } else { - throw new IllegalArgumentException("Document type "+doc.name()+ - " wanted id "+id+" but got "+ - type.getId()+", alternative id was: "+alt); - } + log.warning("Document type "+doc.name()+ + " wanted id "+id+" but got "+ + type.getId()+", alternative id was: "+alt); } inProgress(type); configMap.remove(id); @@ -156,7 +159,7 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub return type; } - + @SuppressWarnings("deprecation") private DataType getOrCreateType(int id) { if (typesById.containsKey(id)) { return typesById.get(id); @@ -189,6 +192,7 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub } } + @SuppressWarnings("deprecation") private void fillStructs(DocumentmanagerConfig config) { for (var thisDataType : config.datatype()) { for (var struct : thisDataType.structtype()) { @@ -289,6 +293,7 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub } } + @SuppressWarnings("deprecation") private void addAnnotationTypePayloads(DocumentmanagerConfig config) { for (DocumentmanagerConfig.Annotationtype annType : config.annotationtype()) { AnnotationType annotationType = manager.getAnnotationTypeRegistry().getType(annType.id()); @@ -314,9 +319,256 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub private final DocumentTypeManager manager; } + + private static class ApplyNewDoctypeConfig { + + public ApplyNewDoctypeConfig(DocumentmanagerConfig config, DocumentTypeManager manager) { + this.manager = manager; + this.usev8geopositions = config.usev8geopositions(); + apply(config); + } + + Map<Integer, DataType> typesByIdx = new HashMap<>(); + + DataType addNewType(int id, DataType type) { + if (type == null) { + throw new IllegalArgumentException("Type to add for idx "+id+" cannot be null"); + } + var old = typesByIdx.put(id, type); + if (old != null) { + throw new IllegalArgumentException("Type "+type+" for idx "+id+" conflict: "+old+" present"); + } + return type; + } + + Map<Integer, Supplier<DataType>> factoryByIdx = new HashMap<>(); + + ArrayList<Integer> proxyRefs = new ArrayList<>(); + + private DataType getOrCreateType(int id) { + if (typesByIdx.containsKey(id)) { + return typesByIdx.get(id); + } + var factory = factoryByIdx.remove(id); + if (factory != null) { + DataType type = factory.get(); + return addNewType(id, type); + } + throw new IllegalArgumentException("No type or factory found for idx: "+id); + } + + void createComplexTypes() { + var toCreate = new ArrayList<>(factoryByIdx.keySet()); + for (var dataTypeId : toCreate) { + var type = getOrCreateType(dataTypeId); + assert(type != null); + } + } + + class PerDocTypeData { + + DocumentmanagerConfig.Doctype docTypeConfig; + + DocumentType docType = null; + + PerDocTypeData(DocumentmanagerConfig.Doctype config) { + this.docTypeConfig = config; + } + + void createSimpleTypes() { + for (var typeconf : docTypeConfig.primitivetype()) { + DataType type = manager.getDataType(typeconf.name()); + if (! (type instanceof PrimitiveDataType)) { + throw new IllegalArgumentException("Needed primitive type for idx "+typeconf.idx()+" but got: "+type); + } + addNewType(typeconf.idx(), type); + } + for (var typeconf : docTypeConfig.tensortype()) { + var type = new TensorDataType(TensorType.fromSpec(typeconf.detailedtype())); + addNewType(typeconf.idx(), type); + } + } + + void createFactories() { + for (var typeconf : docTypeConfig.arraytype()) { + factoryByIdx.put(typeconf.idx(), () -> new ArrayDataType(getOrCreateType(typeconf.elementtype()))); + } + for (var typeconf : docTypeConfig.maptype()) { + factoryByIdx.put(typeconf.idx(), () -> new MapDataType(getOrCreateType(typeconf.keytype()), + getOrCreateType(typeconf.valuetype()))); + } + for (var typeconf : docTypeConfig.wsettype()) { + factoryByIdx.put(typeconf.idx(), () -> new WeightedSetDataType(getOrCreateType(typeconf.elementtype()), + typeconf.createifnonexistent(), + typeconf.removeifzero())); + } + for (var typeconf : docTypeConfig.documentref()) { + factoryByIdx.put(typeconf.idx(), () -> ReferenceDataType.createWithInferredId(inProgressById.get(typeconf.targettype()).docType)); + } + for (var typeconf : docTypeConfig.annotationref()) { + factoryByIdx.put(typeconf.idx(), () -> new AnnotationReferenceDataType + (annTypeFromIdx(typeconf.annotationtype()))); + } + } + + void createEmptyStructs() { + String docName = docTypeConfig.name(); + for (var typeconf : docTypeConfig.structtype()) { + addNewType(typeconf.idx(), new StructDataType(typeconf.name())); + } + } + + void initializeDocType() { + Set<String> importedFields = new HashSet<>(); + for (var imported : docTypeConfig.importedfield()) { + importedFields.add(imported.name()); + } + int contentIdx = docTypeConfig.contentstruct(); + DataType contentStruct = typesByIdx.get(contentIdx); + if (! (contentStruct instanceof StructDataType)) { + throw new IllegalArgumentException("Content struct for document type "+docTypeConfig.name()+ + " should be a struct, but was: "+contentStruct); + } + if (docTypeConfig.name().equals(DataType.DOCUMENT.getName())) { + this.docType = DataType.DOCUMENT; + } else { + this.docType = new DocumentType(docTypeConfig.name(), (StructDataType)contentStruct, importedFields); + } + addNewType(docTypeConfig.idx(), docType); + } + + void createEmptyAnnotationTypes() { + for (var typeconf : docTypeConfig.annotationtype()) { + AnnotationType annType = manager.getAnnotationTypeRegistry().getType(typeconf.name()); + if (typeconf.internalid() != -1) { + if (annType == null) { + annType = new AnnotationType(typeconf.name(), typeconf.internalid()); + } else { + if (annType.getId() != typeconf.internalid()) { + throw new IllegalArgumentException("Wrong internalid for annotation type "+annType+ + " (wanted "+typeconf.internalid()+", got "+annType.getId()+")"); + } + } + } else if (annType == null) { + annType = new AnnotationType(typeconf.name()); + } + manager.getAnnotationTypeRegistry().register(annType); + // because AnnotationType is not a DataType, make a proxy + var proxy = new AnnotationReferenceDataType(annType); + proxyRefs.add(typeconf.idx()); + addNewType(typeconf.idx(), proxy); + } + } + + AnnotationType annTypeFromIdx(int idx) { + var proxy = (AnnotationReferenceDataType) typesByIdx.get(idx); + if (proxy == null) { + throw new IllegalArgumentException("Needed AnnotationType for idx "+idx+", found: "+typesByIdx.get(idx)); + } + return proxy.getAnnotationType(); + } + + void fillAnnotationTypes() { + for (var typeConf : docTypeConfig.annotationtype()) { + var annType = annTypeFromIdx(typeConf.idx()); + int pIdx = typeConf.datatype(); + if (pIdx != -1) { + DataType payload = getOrCreateType(pIdx); + annType.setDataType(payload); + } + for (var inherit : typeConf.inherits()) { + var inheritedType = annTypeFromIdx(inherit.idx()); + if (! annType.inherits(inheritedType)) { + annType.inherit(inheritedType); + } + } + } + } + void fillStructs() { + for (var structCfg : docTypeConfig.structtype()) { + int idx = structCfg.idx(); + StructDataType type = (StructDataType) typesByIdx.get(idx); + for (var parent : structCfg.inherits()) { + var parentStruct = (StructDataType) typesByIdx.get(parent.type()); + type.inherit(parentStruct); + } + for (var fieldCfg : structCfg.field()) { + if (fieldCfg.type() == idx) { + log.fine("Self-referencing struct "+structCfg.name()+" field: "+fieldCfg); + } + DataType fieldType = getOrCreateType(fieldCfg.type()); + type.addField(new Field(fieldCfg.name(), fieldCfg.internalid(), fieldType)); + } + } + } + void fillDocument() { + for (var inherit : docTypeConfig.inherits()) { + var data = inProgressById.get(inherit.idx()); + if (data == null) { + throw new IllegalArgumentException("Missing doctype for inherit idx: "+inherit.idx()); + } else { + docType.inherit(data.docType); + } + } + Map<String, Collection<String>> fieldSets = new HashMap<>(); + for (var entry : docTypeConfig.fieldsets().entrySet()) { + fieldSets.put(entry.getKey(), entry.getValue().fields()); + } + Set<String> importedFields = new HashSet<>(); + for (var imported : docTypeConfig.importedfield()) { + importedFields.add(imported.name()); + } + docType.addFieldSets(fieldSets); + } + } + + private final Map<String, PerDocTypeData> inProgressByName = new HashMap<>(); + private final Map<Integer, PerDocTypeData> inProgressById = new HashMap<>(); + + private void apply(DocumentmanagerConfig config) { + for (var docType : config.doctype()) { + int idx = docType.idx(); + String name = docType.name(); + var data = new PerDocTypeData(docType); + var old = inProgressById.put(idx, data); + if (old != null) { + throw new IllegalArgumentException("Multiple document types with id: "+idx); + } + old = inProgressByName.put(name, data); + if (old != null) { + throw new IllegalArgumentException("Multiple document types with name: "+name); + } + } + for (var docType : config.doctype()) { + var docTypeData = inProgressById.get(docType.idx()); + docTypeData.createEmptyStructs(); + docTypeData.initializeDocType(); + docTypeData.createEmptyAnnotationTypes(); + docTypeData.createFactories(); + docTypeData.createSimpleTypes(); + } + createComplexTypes(); + for (var docType : config.doctype()) { + var docTypeData = inProgressById.get(docType.idx()); + docTypeData.fillStructs(); + docTypeData.fillDocument(); + docTypeData.fillAnnotationTypes(); + } + for (int idx : proxyRefs) { + typesByIdx.remove(idx); + } + for (DataType type : typesByIdx.values()) { + manager.register(type); + } + } + + private final boolean usev8geopositions; + private final DocumentTypeManager manager; + } + public static DocumentTypeManager configureNewManager(DocumentmanagerConfig config) { DocumentTypeManager manager = new DocumentTypeManager(); - new Apply(config, manager); + configureNewManager(config, manager); return manager; } diff --git a/document/src/main/java/com/yahoo/document/annotation/Annotation.java b/document/src/main/java/com/yahoo/document/annotation/Annotation.java index a5f70c2b9e3..2ee2d0baaa7 100644 --- a/document/src/main/java/com/yahoo/document/annotation/Annotation.java +++ b/document/src/main/java/com/yahoo/document/annotation/Annotation.java @@ -223,6 +223,7 @@ public class Annotation implements Comparable<Annotation> { public String toString() { String retval = "annotation of type " + type; retval += ((value == null) ? " (no value)" : " (with value)"); + retval += ((spanNode == null) ? " (no span)" : (" with span "+spanNode)); return retval; } diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java index 58cc3c22199..9115a000e20 100644 --- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java +++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java @@ -714,6 +714,7 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); + int skipToPos = buf.position() + length; if ((features & (byte) 1) == (byte) 1) { //we have a span node @@ -728,15 +729,19 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu if ((features & (byte) 2) == (byte) 2) { //we have a value: int dataTypeId = buf.getInt(); - - //if this data type ID the same as the one in our config? - if (dataTypeId != type.getDataType().getId()) { - //not the same, but we will handle it gracefully, and just skip past the data: - buf.position(buf.position() + length - 4); - } else { + try { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); + // could get buffer underflow or DeserializationException + } catch (RuntimeException rte) { + if (dataTypeId == type.getDataType().getId()) { + throw new DeserializationException("Could not deserialize annotation payload", rte); + } + // XXX: does this make sense? The annotation without its payload may be a problem. + // handle it gracefully, and just skip past the data + } finally { + buf.position(skipToPos); } } } diff --git a/document/src/test/document/documentmanager.cfg b/document/src/test/document/documentmanager.cfg index e4c581304ce..6ceda63e606 100644 --- a/document/src/test/document/documentmanager.cfg +++ b/document/src/test/document/documentmanager.cfg @@ -1,105 +1,96 @@ -datatype[11] -datatype[0].id -1365874599 -datatype[0].arraytype[0] -datatype[0].weightedsettype[0] -datatype[0].structtype[1] -datatype[0].structtype[0].name foobar.header -datatype[0].structtype[0].version 9 -datatype[0].structtype[0].field[2] -datatype[0].structtype[0].field[0].name foobarfield1 -datatype[0].structtype[0].field[0].id[0] -datatype[0].structtype[0].field[0].datatype 4 -datatype[0].structtype[0].field[1].name foobarfield0 -datatype[0].structtype[0].field[1].id[0] -datatype[0].structtype[0].field[1].datatype 2 -datatype[0].documenttype[0] -datatype[1].id 278604398 -datatype[1].arraytype[0] -datatype[1].weightedsettype[0] -datatype[1].structtype[1] -datatype[1].structtype[0].name foobar.body -datatype[1].structtype[0].version 9 -datatype[1].documenttype[0] -datatype[2].id 378030104 -datatype[2].arraytype[0] -datatype[2].weightedsettype[0] -datatype[2].structtype[0] -datatype[2].documenttype[1] -datatype[2].documenttype[0].name foobar -datatype[2].documenttype[0].version 9 -datatype[2].documenttype[0].inherits[0] -datatype[2].documenttype[0].headerstruct -1365874599 -datatype[2].documenttype[0].bodystruct 278604398 -datatype[3].id 673066331 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name banana.header -datatype[3].structtype[0].version 234 -datatype[3].structtype[0].field[1] -datatype[3].structtype[0].field[0].name bananafield0 -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 16 -datatype[3].documenttype[0] -datatype[4].id -176986064 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[1] -datatype[4].structtype[0].name banana.body -datatype[4].structtype[0].version 234 -datatype[4].documenttype[0] -datatype[5].id 556449802 -datatype[5].arraytype[0] -datatype[5].weightedsettype[0] -datatype[5].structtype[0] -datatype[5].documenttype[1] -datatype[5].documenttype[0].name banana -datatype[5].documenttype[0].version 234 -datatype[5].documenttype[0].inherits[1] -datatype[5].documenttype[0].inherits[0].name foobar -datatype[5].documenttype[0].inherits[0].version 9 -datatype[5].documenttype[0].headerstruct 673066331 -datatype[5].documenttype[0].bodystruct -176986064 -datatype[6].id -858669928 -datatype[6].arraytype[0] -datatype[6].weightedsettype[0] -datatype[6].structtype[1] -datatype[6].structtype[0].name customtypes.header -datatype[6].structtype[0].version 3 -datatype[6].structtype[0].field[2] -datatype[6].structtype[0].field[0].name arrayfloat -datatype[6].structtype[0].field[0].id[0] -datatype[6].structtype[0].field[0].datatype 99 -datatype[6].structtype[0].field[1].name arrayarrayfloat -datatype[6].structtype[0].field[1].id[0] -datatype[6].structtype[0].field[1].datatype 4003 -datatype[6].documenttype[0] -datatype[7].id 99 -datatype[7].arraytype[1] -datatype[7].arraytype[0].datatype 1 -datatype[7].weightedsettype[0] -datatype[7].structtype[0] -datatype[7].documenttype[0] -datatype[8].id 4003 -datatype[8].arraytype[1] -datatype[8].arraytype[0].datatype 99 -datatype[8].weightedsettype[0] -datatype[8].structtype[0] -datatype[8].documenttype[0] -datatype[9].id 2142817261 -datatype[9].arraytype[0] -datatype[9].weightedsettype[0] -datatype[9].structtype[1] -datatype[9].structtype[0].name customtypes.body -datatype[9].structtype[0].version 3 -datatype[9].documenttype[0] -datatype[10].id -1500313747 -datatype[10].arraytype[0] -datatype[10].weightedsettype[0] -datatype[10].structtype[0] -datatype[10].documenttype[1] -datatype[10].documenttype[0].name customtypes -datatype[10].documenttype[0].version 3 -datatype[10].documenttype[0].inherits[0] -datatype[10].documenttype[0].headerstruct -858669928 -datatype[10].documenttype[0].bodystruct 2142817261 +doctype[4] +doctype[0].name "document" +doctype[0].idx 1000 +doctype[0].contentstruct 1001 +doctype[0].primitivetype[0].idx 1002 +doctype[0].primitivetype[0].name "int" +doctype[0].primitivetype[1].idx 1003 +doctype[0].primitivetype[1].name "double" +doctype[0].primitivetype[2].idx 1004 +doctype[0].primitivetype[2].name "string" +doctype[0].annotationtype[0].idx 1005 +doctype[0].annotationtype[0].name "proximity_break" +doctype[0].annotationtype[0].internalid 8 +doctype[0].annotationtype[0].datatype 1003 +doctype[0].annotationtype[1].idx 1006 +doctype[0].annotationtype[1].name "normalized" +doctype[0].annotationtype[1].internalid 4 +doctype[0].annotationtype[1].datatype 1004 +doctype[0].annotationtype[2].idx 1007 +doctype[0].annotationtype[2].name "reading" +doctype[0].annotationtype[2].internalid 5 +doctype[0].annotationtype[2].datatype 1004 +doctype[0].annotationtype[3].idx 1008 +doctype[0].annotationtype[3].name "term" +doctype[0].annotationtype[3].internalid 1 +doctype[0].annotationtype[3].datatype 1004 +doctype[0].annotationtype[4].idx 1009 +doctype[0].annotationtype[4].name "transformed" +doctype[0].annotationtype[4].internalid 7 +doctype[0].annotationtype[4].datatype 1004 +doctype[0].annotationtype[5].idx 1010 +doctype[0].annotationtype[5].name "canonical" +doctype[0].annotationtype[5].internalid 3 +doctype[0].annotationtype[5].datatype 1004 +doctype[0].annotationtype[6].idx 1011 +doctype[0].annotationtype[6].name "token_type" +doctype[0].annotationtype[6].internalid 2 +doctype[0].annotationtype[6].datatype 1002 +doctype[0].annotationtype[7].idx 1012 +doctype[0].annotationtype[7].name "special_token" +doctype[0].annotationtype[7].internalid 9 +doctype[0].annotationtype[8].idx 1013 +doctype[0].annotationtype[8].name "stem" +doctype[0].annotationtype[8].internalid 6 +doctype[0].annotationtype[8].datatype 1004 +doctype[0].structtype[0].idx 1001 +doctype[0].structtype[0].name document.header +doctype[1].name "foobar" +doctype[1].idx 1014 +doctype[1].inherits[0].idx 1000 +doctype[1].contentstruct 1015 +doctype[1].primitivetype[0].idx 1016 +doctype[1].primitivetype[0].name "long" +doctype[1].structtype[0].idx 1015 +doctype[1].structtype[0].name foobar.header +doctype[1].structtype[0].field[0].name "foobarfield1" +doctype[1].structtype[0].field[0].internalid 1707020592 +doctype[1].structtype[0].field[0].type 1016 +doctype[1].structtype[0].field[1].name "foobarfield0" +doctype[1].structtype[0].field[1].internalid 1055920092 +doctype[1].structtype[0].field[1].type 1004 +doctype[2].name "banana" +doctype[2].idx 1017 +doctype[2].inherits[0].idx 1014 +doctype[2].contentstruct 1018 +doctype[2].primitivetype[0].idx 1019 +doctype[2].primitivetype[0].name "byte" +doctype[2].structtype[0].idx 1018 +doctype[2].structtype[0].name banana.header +doctype[2].structtype[0].field[0].name "foobarfield1" +doctype[2].structtype[0].field[0].internalid 1707020592 +doctype[2].structtype[0].field[0].type 1016 +doctype[2].structtype[0].field[1].name "foobarfield0" +doctype[2].structtype[0].field[1].internalid 1055920092 +doctype[2].structtype[0].field[1].type 1004 +doctype[2].structtype[0].field[2].name "bananafield0" +doctype[2].structtype[0].field[2].internalid 1294599520 +doctype[2].structtype[0].field[2].type 1019 +doctype[3].name "customtypes" +doctype[3].idx 1020 +doctype[3].inherits[0].idx 1000 +doctype[3].contentstruct 1021 +doctype[3].primitivetype[0].idx 1023 +doctype[3].primitivetype[0].name "float" +doctype[3].arraytype[0].idx 1022 +doctype[3].arraytype[0].elementtype 1024 +doctype[3].arraytype[1].idx 1024 +doctype[3].arraytype[1].elementtype 1023 +doctype[3].structtype[0].idx 1021 +doctype[3].structtype[0].name customtypes.header +doctype[3].structtype[0].field[0].name "arrayfloat" +doctype[3].structtype[0].field[0].internalid 1493411963 +doctype[3].structtype[0].field[0].type 1024 +doctype[3].structtype[0].field[1].name "arrayarrayfloat" +doctype[3].structtype[0].field[1].internalid 890649191 +doctype[3].structtype[0].field[1].type 1022 diff --git a/document/src/test/java/com/yahoo/document/DocumentTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTestCase.java index 3d7eb49f1f9..47605264d44 100644 --- a/document/src/test/java/com/yahoo/document/DocumentTestCase.java +++ b/document/src/test/java/com/yahoo/document/DocumentTestCase.java @@ -100,6 +100,7 @@ public class DocumentTestCase extends DocumentTestCaseBase { return dcMan; } + @SuppressWarnings("deprecation") public void setUpSertestDocType() { docMan = new DocumentTypeManager(); @@ -877,6 +878,7 @@ public class DocumentTestCase extends DocumentTestCaseBase { doc.setFieldValue("something", testlist); } + @SuppressWarnings("deprecation") @Test public void testCompressionConfiguredIsIgnored() { @@ -1094,6 +1096,7 @@ public class DocumentTestCase extends DocumentTestCaseBase { assertEquals(doc, doc2); } + @SuppressWarnings("deprecation") @Test public void testUnknownFieldsDeserialization() { DocumentTypeManager docTypeManasjer = new DocumentTypeManager(); diff --git a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java index 4040f3455da..0aa5aec4b85 100644 --- a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java +++ b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java @@ -59,6 +59,7 @@ public class DocumentTypeManagerTestCase { assertSame(DataType.DOUBLE, doubleType); } + @SuppressWarnings("deprecation") @Test public void testRecursiveRegister() { StructDataType struct = new StructDataType("mystruct"); @@ -87,6 +88,7 @@ public class DocumentTypeManagerTestCase { assertEquals(docType2, manager.getDocumentType(new DataTypeName("myotherdoc"))); } + @SuppressWarnings("deprecation") @Test public void testMultipleDocuments() { DocumentType docType1 = new DocumentType("foo0"); @@ -120,6 +122,7 @@ public class DocumentTypeManagerTestCase { assertEquals(manager.getDocumentTypes().get(new DataTypeName("foo1")), docType2); } + @SuppressWarnings("deprecation") @Test public void testReverseMapOrder() { DocumentTypeManager manager = createConfiguredManager("file:src/test/document/documentmanager.map.cfg"); @@ -190,7 +193,7 @@ public class DocumentTypeManagerTestCase { Field arrayfloat = type.getField("arrayfloat"); ArrayDataType dataType = (ArrayDataType) arrayfloat.getDataType(); - assertTrue(dataType.getCode() == 99); + // assertTrue(dataType.getCode() == 99); assertTrue(dataType.getValueClass().equals(Array.class)); assertTrue(dataType.getNestedType().getCode() == 1); assertTrue(dataType.getNestedType().getValueClass().equals(FloatFieldValue.class)); @@ -198,9 +201,9 @@ public class DocumentTypeManagerTestCase { Field arrayarrayfloat = type.getField("arrayarrayfloat"); ArrayDataType subType = (ArrayDataType) arrayarrayfloat.getDataType(); - assertTrue(subType.getCode() == 4003); + // assertTrue(subType.getCode() == 4003); assertTrue(subType.getValueClass().equals(Array.class)); - assertTrue(subType.getNestedType().getCode() == 99); + // assertTrue(subType.getNestedType().getCode() == 99); assertTrue(subType.getNestedType().getValueClass().equals(Array.class)); ArrayDataType subSubType = (ArrayDataType) subType.getNestedType(); assertTrue(subSubType.getNestedType().getCode() == 1); @@ -215,7 +218,7 @@ public class DocumentTypeManagerTestCase { DocumentType customtypes = manager.getDocumentType(new DataTypeName("customtypes")); assertNull(banana.getField("newfield")); - assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT, 99)), customtypes.getField("arrayfloat")); + assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT)), customtypes.getField("arrayfloat")); var sub = DocumentTypeManagerConfigurer.configure(manager, "file:src/test/document/documentmanager.updated.cfg"); sub.close(); @@ -501,6 +504,7 @@ search annotationsimplicitstruct { assertReferenceTypePresentInManager(manager, 12345678, "referenced_type"); } + @SuppressWarnings("deprecation") private static void assertReferenceTypePresentInManager(DocumentTypeManager manager, int refTypeId, String refTargetTypeName) { DataType type = manager.getDataType(refTypeId); diff --git a/document/src/test/vespaxmlparser/alltypes.cfg b/document/src/test/vespaxmlparser/alltypes.cfg deleted file mode 100644 index 5d89611d24b..00000000000 --- a/document/src/test/vespaxmlparser/alltypes.cfg +++ /dev/null @@ -1,101 +0,0 @@ -datatype[5] -datatype[0].id -240642363 -datatype[0].arraytype[0] -datatype[0].weightedsettype[0] -datatype[0].structtype[1] -datatype[0].structtype[0].name alltypes.header -datatype[0].structtype[0].version 0 -datatype[0].structtype[0].field[0] -datatype[0].documenttype[0] -datatype[1].id 1000002 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 2 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 2000001 -datatype[2].arraytype[0] -datatype[2].weightedsettype[1] -datatype[2].weightedsettype[0].datatype 2 -datatype[2].weightedsettype[0].createifnonexistant false -datatype[2].weightedsettype[0].removeifzero false -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 163574298 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name alltypes.body -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].field[20] -datatype[3].structtype[0].field[0].name stringval -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 2 -datatype[3].structtype[0].field[1].name intval1 -datatype[3].structtype[0].field[1].id[0] -datatype[3].structtype[0].field[1].datatype 0 -datatype[3].structtype[0].field[2].name intval2 -datatype[3].structtype[0].field[2].id[0] -datatype[3].structtype[0].field[2].datatype 0 -datatype[3].structtype[0].field[3].name intval3 -datatype[3].structtype[0].field[3].id[0] -datatype[3].structtype[0].field[3].datatype 0 -datatype[3].structtype[0].field[4].name longval1 -datatype[3].structtype[0].field[4].id[0] -datatype[3].structtype[0].field[4].datatype 4 -datatype[3].structtype[0].field[5].name longval2 -datatype[3].structtype[0].field[5].id[0] -datatype[3].structtype[0].field[5].datatype 4 -datatype[3].structtype[0].field[6].name longval3 -datatype[3].structtype[0].field[6].id[0] -datatype[3].structtype[0].field[6].datatype 4 -datatype[3].structtype[0].field[7].name byteval1 -datatype[3].structtype[0].field[7].id[0] -datatype[3].structtype[0].field[7].datatype 16 -datatype[3].structtype[0].field[8].name byteval2 -datatype[3].structtype[0].field[8].id[0] -datatype[3].structtype[0].field[8].datatype 16 -datatype[3].structtype[0].field[9].name byteval3 -datatype[3].structtype[0].field[9].id[0] -datatype[3].structtype[0].field[9].datatype 16 -datatype[3].structtype[0].field[10].name floatval -datatype[3].structtype[0].field[10].id[0] -datatype[3].structtype[0].field[10].datatype 1 -datatype[3].structtype[0].field[11].name doubleval -datatype[3].structtype[0].field[11].id[0] -datatype[3].structtype[0].field[11].datatype 5 -datatype[3].structtype[0].field[12].name rawval1 -datatype[3].structtype[0].field[12].id[0] -datatype[3].structtype[0].field[12].datatype 3 -datatype[3].structtype[0].field[13].name rawval2 -datatype[3].structtype[0].field[13].id[0] -datatype[3].structtype[0].field[13].datatype 3 -datatype[3].structtype[0].field[14].name urival -datatype[3].structtype[0].field[14].id[0] -datatype[3].structtype[0].field[14].datatype 10 -datatype[3].structtype[0].field[15].name contentval1 -datatype[3].structtype[0].field[15].id[0] -datatype[3].structtype[0].field[15].datatype 12 -datatype[3].structtype[0].field[16].name contentval2 -datatype[3].structtype[0].field[16].id[0] -datatype[3].structtype[0].field[16].datatype 12 -datatype[3].structtype[0].field[17].name arrayofstringval -datatype[3].structtype[0].field[17].id[0] -datatype[3].structtype[0].field[17].datatype 1000002 -datatype[3].structtype[0].field[18].name weightedsetofstringval -datatype[3].structtype[0].field[18].id[0] -datatype[3].structtype[0].field[18].datatype 2000001 -datatype[3].structtype[0].field[19].name tagval -datatype[3].structtype[0].field[19].id[0] -datatype[3].structtype[0].field[19].datatype 18 -datatype[3].documenttype[0] -datatype[4].id -1126644934 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[0] -datatype[4].documenttype[1] -datatype[4].documenttype[0].name alltypes -datatype[4].documenttype[0].version 0 -datatype[4].documenttype[0].inherits[0] -datatype[4].documenttype[0].headerstruct -240642363 -datatype[4].documenttype[0].bodystruct 163574298 diff --git a/document/src/test/vespaxmlparser/documentmanager.cfg b/document/src/test/vespaxmlparser/documentmanager.cfg deleted file mode 100644 index 6662f5caab5..00000000000 --- a/document/src/test/vespaxmlparser/documentmanager.cfg +++ /dev/null @@ -1,109 +0,0 @@ -datatype[10] -datatype[0].id 1002 -datatype[0].arraytype[1] -datatype[0].arraytype[0].datatype 2 -datatype[0].weightedsettype[0] -datatype[0].structtype[0] -datatype[0].documenttype[0] -datatype[1].id 1000 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 0 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 1004 -datatype[2].arraytype[1] -datatype[2].arraytype[0].datatype 4 -datatype[2].weightedsettype[0] -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 1016 -datatype[3].arraytype[1] -datatype[3].arraytype[0].datatype 16 -datatype[3].weightedsettype[0] -datatype[3].structtype[0] -datatype[3].documenttype[0] -datatype[4].id 1001 -datatype[4].arraytype[1] -datatype[4].arraytype[0].datatype 1 -datatype[4].weightedsettype[0] -datatype[4].structtype[0] -datatype[4].documenttype[0] -datatype[5].id 2001 -datatype[5].arraytype[0] -datatype[5].weightedsettype[1] -datatype[5].weightedsettype[0].datatype 0 -datatype[5].weightedsettype[0].createifnonexistant false -datatype[5].weightedsettype[0].removeifzero false -datatype[5].structtype[0] -datatype[5].documenttype[0] -datatype[6].id 2002 -datatype[6].arraytype[0] -datatype[6].weightedsettype[1] -datatype[6].weightedsettype[0].datatype 2 -datatype[6].weightedsettype[0].createifnonexistant false -datatype[6].weightedsettype[0].removeifzero false -datatype[6].structtype[0] -datatype[6].documenttype[0] -datatype[7].id -628990518 -datatype[7].arraytype[0] -datatype[7].weightedsettype[0] -datatype[7].structtype[1] -datatype[7].structtype[0].name news.header -datatype[7].structtype[0].version 0 -datatype[7].structtype[0].field[12] -datatype[7].structtype[0].field[0].name url -datatype[7].structtype[0].field[0].id[0] -datatype[7].structtype[0].field[0].datatype 10 -datatype[7].structtype[0].field[1].name title -datatype[7].structtype[0].field[1].id[0] -datatype[7].structtype[0].field[1].datatype 2 -datatype[7].structtype[0].field[2].name last_downloaded -datatype[7].structtype[0].field[2].id[0] -datatype[7].structtype[0].field[2].datatype 0 -datatype[7].structtype[0].field[3].name value_long -datatype[7].structtype[0].field[3].id[0] -datatype[7].structtype[0].field[3].datatype 4 -datatype[7].structtype[0].field[4].name value_content -datatype[7].structtype[0].field[4].id[0] -datatype[7].structtype[0].field[4].datatype 12 -datatype[7].structtype[0].field[5].name stringarr -datatype[7].structtype[0].field[5].id[0] -datatype[7].structtype[0].field[5].datatype 1002 -datatype[7].structtype[0].field[6].name intarr -datatype[7].structtype[0].field[6].id[0] -datatype[7].structtype[0].field[6].datatype 1000 -datatype[7].structtype[0].field[7].name longarr -datatype[7].structtype[0].field[7].id[0] -datatype[7].structtype[0].field[7].datatype 1004 -datatype[7].structtype[0].field[8].name bytearr -datatype[7].structtype[0].field[8].id[0] -datatype[7].structtype[0].field[8].datatype 1016 -datatype[7].structtype[0].field[9].name floatarr -datatype[7].structtype[0].field[9].id[0] -datatype[7].structtype[0].field[9].datatype 1001 -datatype[7].structtype[0].field[10].name weightedsetint -datatype[7].structtype[0].field[10].id[0] -datatype[7].structtype[0].field[10].datatype 2001 -datatype[7].structtype[0].field[11].name weightedsetstring -datatype[7].structtype[0].field[11].id[0] -datatype[7].structtype[0].field[11].datatype 2002 -datatype[7].documenttype[0] -datatype[8].id 538588767 -datatype[8].arraytype[0] -datatype[8].weightedsettype[0] -datatype[8].structtype[1] -datatype[8].structtype[0].name news.body -datatype[8].structtype[0].version 0 -datatype[8].structtype[0].field[0] -datatype[8].documenttype[0] -datatype[9].id -1048827947 -datatype[9].arraytype[0] -datatype[9].weightedsettype[0] -datatype[9].structtype[0] -datatype[9].documenttype[1] -datatype[9].documenttype[0].name news -datatype[9].documenttype[0].version 0 -datatype[9].documenttype[0].inherits[0] -datatype[9].documenttype[0].headerstruct -628990518 -datatype[9].documenttype[0].bodystruct 538588767 diff --git a/document/src/tests/data/defaultdocument.cfg b/document/src/tests/data/defaultdocument.cfg deleted file mode 100644 index 9780f43def6..00000000000 --- a/document/src/tests/data/defaultdocument.cfg +++ /dev/null @@ -1,94 +0,0 @@ -enablecompression false -datatype[6] -datatype[0].id 1000 -datatype[0].arraytype[1] -datatype[0].arraytype[0].datatype 0 -datatype[0].weightedsettype[0] -datatype[0].structtype[0] -datatype[0].documenttype[0] -datatype[1].id 1003 -datatype[1].arraytype[1] -datatype[1].arraytype[0].datatype 3 -datatype[1].weightedsettype[0] -datatype[1].structtype[0] -datatype[1].documenttype[0] -datatype[2].id 2002 -datatype[2].arraytype[0] -datatype[2].weightedsettype[1] -datatype[2].weightedsettype[0].datatype 2 -datatype[2].weightedsettype[0].createifnonexistant false -datatype[2].weightedsettype[0].removeifzero false -datatype[2].structtype[0] -datatype[2].documenttype[0] -datatype[3].id 5000 -datatype[3].arraytype[0] -datatype[3].weightedsettype[0] -datatype[3].structtype[1] -datatype[3].structtype[0].name testdoc.header -datatype[3].structtype[0].version 0 -datatype[3].structtype[0].field[3] -datatype[3].structtype[0].field[0].name intattr -datatype[3].structtype[0].field[0].id[0] -datatype[3].structtype[0].field[0].datatype 0 -datatype[3].structtype[0].field[1].name doubleattr -datatype[3].structtype[0].field[1].id[0] -datatype[3].structtype[0].field[1].datatype 5 -datatype[3].structtype[0].field[2].name floatattr -datatype[3].structtype[0].field[2].id[0] -datatype[3].structtype[0].field[2].datatype 1 -datatype[3].documenttype[0] -datatype[4].id 5001 -datatype[4].arraytype[0] -datatype[4].weightedsettype[0] -datatype[4].structtype[1] -datatype[4].structtype[0].name testdoc.body -datatype[4].structtype[0].version 0 -datatype[4].structtype[0].field[11] -datatype[4].structtype[0].field[0].name stringattr -datatype[4].structtype[0].field[0].id[0] -datatype[4].structtype[0].field[0].datatype 2 -datatype[4].structtype[0].field[1].name stringattr2 -datatype[4].structtype[0].field[1].id[0] -datatype[4].structtype[0].field[1].datatype 2 -datatype[4].structtype[0].field[2].name longattr -datatype[4].structtype[0].field[2].id[0] -datatype[4].structtype[0].field[2].datatype 4 -datatype[4].structtype[0].field[3].name byteattr -datatype[4].structtype[0].field[3].id[0] -datatype[4].structtype[0].field[3].datatype 16 -datatype[4].structtype[0].field[4].name rawattr -datatype[4].structtype[0].field[4].id[0] -datatype[4].structtype[0].field[4].datatype 3 -datatype[4].structtype[0].field[5].name minattr -datatype[4].structtype[0].field[5].id[0] -datatype[4].structtype[0].field[5].datatype 0 -datatype[4].structtype[0].field[6].name minattr2 -datatype[4].structtype[0].field[6].id[0] -datatype[4].structtype[0].field[6].datatype 0 -datatype[4].structtype[0].field[7].name arrayattr -datatype[4].structtype[0].field[7].id[0] -datatype[4].structtype[0].field[7].datatype 1000 -datatype[4].structtype[0].field[8].name rawarrayattr -datatype[4].structtype[0].field[8].id[0] -datatype[4].structtype[0].field[8].datatype 1003 -datatype[4].structtype[0].field[9].name stringweightedsetattr -datatype[4].structtype[0].field[9].id[0] -datatype[4].structtype[0].field[9].datatype 2002 -datatype[4].structtype[0].field[10].name uri -datatype[4].structtype[0].field[10].id[0] -datatype[4].structtype[0].field[10].datatype 2 -datatype[4].structtype[0].field[11].name docfield -datatype[4].structtype[0].field[11].id[0] -datatype[4].structtype[0].field[11].datatype 8 -datatype[4].documenttype[0] -datatype[5].id 5002 -datatype[5].arraytype[0] -datatype[5].weightedsettype[0] -datatype[5].structtype[0] -datatype[5].documenttype[1] -datatype[5].documenttype[0].name testdoc -datatype[5].documenttype[0].version 0 -datatype[5].documenttype[0].inherits[0] -datatype[5].documenttype[0].headerstruct 5000 -datatype[5].documenttype[0].bodystruct 5001 - diff --git a/document/src/vespa/document/config/documentmanager.def b/document/src/vespa/document/config/documentmanager.def index b9e7cc0f0d1..ec19ba8d802 100644 --- a/document/src/vespa/document/config/documentmanager.def +++ b/document/src/vespa/document/config/documentmanager.def @@ -107,3 +107,159 @@ annotationtype[].id int annotationtype[].name string annotationtype[].datatype int default=-1 annotationtype[].inherits[].id int + + +# Here starts a new model for how datatypes are configured, where +# everything is per document-type, and each documenttype contains the +# datatypes it defines. Will be used (only?) if the arrays above +# (datatype[] and annotationtype[]) are empty. + + +# Note: we will include the built-in "document" document +# type that all other doctypes inherit from also, in order +# to get all the primitive and built-in types declared +# with an idx we can refer to. + +## Name of the document type. Must be unique. +doctype[].name string + +# Note: indexes are only meaningful as internal references in this +# config; they will typically be sequential (1,2,3,...) in the order +# that they are generated (but nothing should depend on that). + +## Index of this type (as a datatype which can be referred to). +doctype[].idx int + +# Could also use name here? +## Specify document types to inherit +doctype[].inherits[].idx int + +## Index of struct defining document fields +doctype[].contentstruct int + +## Field sets available for this document type +doctype[].fieldsets{}.fields[] string + +## Imported fields (specified outside the document block in the schema) +doctype[].importedfield[].name string + +# Everything below here is configuration of data types defined by +# this document type. + +# Primitive types must be present as built-in static members. + +## Index of primitive type +doctype[].primitivetype[].idx int + +## The name of this primitive type +doctype[].primitivetype[].name string + +# Arrays are the simplest collection type: + +## Index of this array type +doctype[].arraytype[].idx int + +## Index of the element type this array type contains +doctype[].arraytype[].elementtype int + + +# Maps are another collection type: + +## Index of this map type +doctype[].maptype[].idx int + +## Index of the key type used by this map type +doctype[].maptype[].keytype int + +## Index of the key type used by this map type +doctype[].maptype[].valuetype int + + +# Weighted sets are more complicated; +# they can be considered as an collection +# of unique elements where each element has +# an associated weight: + +## Index of this weighted set type +doctype[].wsettype[].idx int + +## Index of the element types contained in this weighted set type +doctype[].wsettype[].elementtype int + +## Should an update to a nonexistent element cause it to be created +doctype[].wsettype[].createifnonexistent bool default=false + +## Should an element in a weighted set be removed if an update changes the weight to 0 +doctype[].wsettype[].removeifzero bool default=false + + +# Tensors have their own type system + +## Index of this tensor type +doctype[].tensortype[].idx int + +## Description of the type of the actual tensors contained +doctype[].tensortype[].detailedtype string + + +# Document references refer to parent documents that a document can +# import fields from: + +## Index of this reference data type: +doctype[].documentref[].idx int + +# Could also use name? +## Index of the document type this reference type refers to: +doctype[].documentref[].targettype int + + +# Annotation types are another world, but are modeled here +# as if they were also datatypes contained inside document types: + +## Index of an annotation type. +doctype[].annotationtype[].idx int + +## Name of the annotation type. +doctype[].annotationtype[].name string + +# Could we somehow avoid this? +## Internal id of this annotation type +doctype[].annotationtype[].internalid int default=-1 + +## Index of contained datatype of the annotation type, if any +doctype[].annotationtype[].datatype int default=-1 + +## Index of annotation type that this type inherits. +doctype[].annotationtype[].inherits[].idx int + + +# Annotation references are field values referring to +# an annotation of a certain annotation type. + +## Index of this annotation reference type +doctype[].annotationref[].idx int + +## Index of the annotation type this annotation reference type refers to +doctype[].annotationref[].annotationtype int + + +# A struct is just a named collections of fields: + +## Index of this struct type +doctype[].structtype[].idx int + +## Name of the struct type. Must be unique within documenttype. +doctype[].structtype[].name string + +## Index of another struct type to inherit +doctype[].structtype[].inherits[].type int + +## Name of a struct field. Must be unique within the struct type. +doctype[].structtype[].field[].name string + +## The "field id" - used in serialized format! +doctype[].structtype[].field[].internalid int + +## Index of the type of this field +doctype[].structtype[].field[].type int + diff --git a/documentapi/abi-spec.json b/documentapi/abi-spec.json index 78a58f24a65..88ec090d324 100644 --- a/documentapi/abi-spec.json +++ b/documentapi/abi-spec.json @@ -1912,6 +1912,7 @@ "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.lang.String, com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.util.Map)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Builder cluster(java.lang.String, java.util.function.Consumer)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", "public final java.lang.String getDefName()", @@ -1937,6 +1938,7 @@ "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder defaultRoute(java.lang.String)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Route$Builder)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(java.util.function.Consumer)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder route(java.util.List)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster$Builder selector(java.lang.String)", "public com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig$Cluster build()" @@ -2133,6 +2135,7 @@ "public void <init>()", "public void <init>(com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig)", "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Route$Builder)", + "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(java.util.function.Consumer)", "public com.yahoo.documentapi.messagebus.protocol.DocumentrouteselectorpolicyConfig$Builder route(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt index 99c7e9c68b8..2e0af3acfa7 100644 --- a/eval/CMakeLists.txt +++ b/eval/CMakeLists.txt @@ -70,6 +70,7 @@ vespa_define_module( src/tests/instruction/index_lookup_table src/tests/instruction/inplace_map_function src/tests/instruction/join_with_number + src/tests/instruction/l2_distance src/tests/instruction/mixed_inner_product_function src/tests/instruction/mixed_simple_join_function src/tests/instruction/pow_as_map_optimizer diff --git a/eval/src/tests/instruction/l2_distance/CMakeLists.txt b/eval/src/tests/instruction/l2_distance/CMakeLists.txt new file mode 100644 index 00000000000..1e0fc69a3f9 --- /dev/null +++ b/eval/src/tests/instruction/l2_distance/CMakeLists.txt @@ -0,0 +1,10 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +vespa_add_executable(eval_l2_distance_test_app TEST + SOURCES + l2_distance_test.cpp + DEPENDS + vespaeval + GTest::GTest +) +vespa_add_test(NAME eval_l2_distance_test_app COMMAND eval_l2_distance_test_app) diff --git a/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp new file mode 100644 index 00000000000..2cba9dfb18e --- /dev/null +++ b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp @@ -0,0 +1,96 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/eval/eval/fast_value.h> +#include <vespa/eval/eval/tensor_function.h> +#include <vespa/eval/eval/test/eval_fixture.h> +#include <vespa/eval/eval/test/gen_spec.h> +#include <vespa/eval/instruction/l2_distance.h> +#include <vespa/vespalib/util/stash.h> +#include <vespa/vespalib/util/stringfmt.h> + +#include <vespa/vespalib/util/require.h> +#include <vespa/vespalib/gtest/gtest.h> + +using namespace vespalib; +using namespace vespalib::eval; +using namespace vespalib::eval::test; + +const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); + +//----------------------------------------------------------------------------- + +void verify(const TensorSpec &a, const TensorSpec &b, const vespalib::string &expr, bool optimized = true) { + EvalFixture::ParamRepo param_repo; + param_repo.add("a", a).add("b", b); + EvalFixture fast_fixture(prod_factory, expr, param_repo, true); + EXPECT_EQ(fast_fixture.result(), EvalFixture::ref(expr, param_repo)); + EXPECT_EQ(fast_fixture.find_all<L2Distance>().size(), optimized ? 1 : 0); +} + +void verify_cell_types(GenSpec a, GenSpec b, const vespalib::string &expr, bool optimized = true) { + for (CellType act : CellTypeUtils::list_types()) { + for (CellType bct : CellTypeUtils::list_types()) { + if (optimized && (act == bct) && (act != CellType::BFLOAT16)) { + verify(a.cpy().cells(act), b.cpy().cells(bct), expr, true); + } else { + verify(a.cpy().cells(act), b.cpy().cells(bct), expr, false); + } + } + } +} + +//----------------------------------------------------------------------------- + +GenSpec gen(const vespalib::string &desc, int bias) { + return GenSpec::from_desc(desc).cells(CellType::FLOAT).seq(N(bias)); +} + +//----------------------------------------------------------------------------- + +vespalib::string sq_l2 = "reduce((a-b)^2,sum)"; +vespalib::string alt_sq_l2 = "reduce(map((a-b),f(x)(x*x)),sum)"; + +//----------------------------------------------------------------------------- + +TEST(L2DistanceTest, squared_l2_distance_can_be_optimized) { + verify_cell_types(gen("x5", 3), gen("x5", 7), sq_l2); + verify_cell_types(gen("x5", 3), gen("x5", 7), alt_sq_l2); +} + +TEST(L2DistanceTest, trivial_dimensions_are_ignored) { + verify(gen("x5y1", 3), gen("x5", 7), sq_l2); + verify(gen("x5", 3), gen("x5y1", 7), sq_l2); +} + +TEST(L2DistanceTest, multiple_dimensions_can_be_used) { + verify(gen("x5y3", 3), gen("x5y3", 7), sq_l2); +} + +//----------------------------------------------------------------------------- + +TEST(L2DistanceTest, inputs_must_be_dense) { + verify(gen("x5_1", 3), gen("x5_1", 7), sq_l2, false); + verify(gen("x5_1y3", 3), gen("x5_1y3", 7), sq_l2, false); + verify(gen("x5", 3), GenSpec(7), sq_l2, false); + verify(GenSpec(3), gen("x5", 7), sq_l2, false); +} + +TEST(L2DistanceTest, result_must_be_double) { + verify(gen("x5y1", 3), gen("x5y1", 7), "reduce((a-b)^2,sum,x)", false); + verify(gen("x5y1_1", 3), gen("x5y1_1", 7), "reduce((a-b)^2,sum,x)", false); +} + +TEST(L2DistanceTest, dimensions_must_match) { + verify(gen("x5y3", 3), gen("x5", 7), sq_l2, false); + verify(gen("x5", 3), gen("x5y3", 7), sq_l2, false); +} + +TEST(L2DistanceTest, similar_expressions_are_not_optimized) { + verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^2,prod)", false); + verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^3,sum)", false); + verify(gen("x5", 3), gen("x5", 7), "reduce((a+b)^2,sum)", false); +} + +//----------------------------------------------------------------------------- + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp index 09814cc0b06..e1520d4deb2 100644 --- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp +++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp @@ -30,6 +30,7 @@ #include <vespa/eval/instruction/dense_tensor_create_function.h> #include <vespa/eval/instruction/dense_tensor_peek_function.h> #include <vespa/eval/instruction/dense_hamming_distance.h> +#include <vespa/eval/instruction/l2_distance.h> #include <vespa/log/log.h> LOG_SETUP(".eval.eval.optimize_tensor_function"); @@ -56,11 +57,16 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te Child root(expr); run_optimize_pass(root, [&stash](const Child &child) { + child.set(PowAsMapOptimizer::optimize(child.get(), stash)); + }); + run_optimize_pass(root, [&stash](const Child &child) + { child.set(SumMaxDotProductFunction::optimize(child.get(), stash)); }); run_optimize_pass(root, [&stash](const Child &child) { child.set(BestSimilarityFunction::optimize(child.get(), stash)); + child.set(L2Distance::optimize(child.get(), stash)); }); run_optimize_pass(root, [&stash](const Child &child) { @@ -83,7 +89,6 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te child.set(DenseLambdaPeekOptimizer::optimize(child.get(), stash)); child.set(UnpackBitsFunction::optimize(child.get(), stash)); child.set(FastRenameOptimizer::optimize(child.get(), stash)); - child.set(PowAsMapOptimizer::optimize(child.get(), stash)); child.set(InplaceMapFunction::optimize(child.get(), stash)); child.set(MixedSimpleJoinFunction::optimize(child.get(), stash)); child.set(JoinWithNumberFunction::optimize(child.get(), stash)); diff --git a/eval/src/vespa/eval/eval/typed_cells.h b/eval/src/vespa/eval/eval/typed_cells.h index 872488527c2..b8640698d13 100644 --- a/eval/src/vespa/eval/eval/typed_cells.h +++ b/eval/src/vespa/eval/eval/typed_cells.h @@ -20,8 +20,8 @@ struct TypedCells { explicit TypedCells(ConstArrayRef<BFloat16> cells) : data(cells.begin()), type(CellType::BFLOAT16), size(cells.size()) {} explicit TypedCells(ConstArrayRef<Int8Float> cells) : data(cells.begin()), type(CellType::INT8), size(cells.size()) {} - TypedCells() : data(nullptr), type(CellType::DOUBLE), size(0) {} - TypedCells(const void *dp, CellType ct, size_t sz) : data(dp), type(ct), size(sz) {} + TypedCells() noexcept : data(nullptr), type(CellType::DOUBLE), size(0) {} + TypedCells(const void *dp, CellType ct, size_t sz) noexcept : data(dp), type(ct), size(sz) {} template <typename T> bool check_type() const { return vespalib::eval::check_cell_type<T>(type); } diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt index a462ece4734..56184c113d4 100644 --- a/eval/src/vespa/eval/instruction/CMakeLists.txt +++ b/eval/src/vespa/eval/instruction/CMakeLists.txt @@ -30,6 +30,7 @@ vespa_add_library(eval_instruction OBJECT index_lookup_table.cpp inplace_map_function.cpp join_with_number_function.cpp + l2_distance.cpp mixed_inner_product_function.cpp mixed_simple_join_function.cpp pow_as_map_optimizer.cpp diff --git a/eval/src/vespa/eval/instruction/l2_distance.cpp b/eval/src/vespa/eval/instruction/l2_distance.cpp new file mode 100644 index 00000000000..3f1e7632431 --- /dev/null +++ b/eval/src/vespa/eval/instruction/l2_distance.cpp @@ -0,0 +1,96 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "l2_distance.h" +#include <vespa/eval/eval/operation.h> +#include <vespa/eval/eval/value.h> +#include <vespa/vespalib/hwaccelrated/iaccelrated.h> +#include <vespa/vespalib/util/require.h> + +#include <vespa/log/log.h> +LOG_SETUP(".eval.instruction.l2_distance"); + +namespace vespalib::eval { + +using namespace tensor_function; + +namespace { + +static const auto &hw = hwaccelrated::IAccelrated::getAccelerator(); + +template <typename T> +double sq_l2(const Value &lhs, const Value &rhs, size_t len) { + return hw.squaredEuclideanDistance((const T *)lhs.cells().data, (const T *)rhs.cells().data, len); +} + +template <> +double sq_l2<Int8Float>(const Value &lhs, const Value &rhs, size_t len) { + return sq_l2<int8_t>(lhs, rhs, len); +} + +template <typename CT> +void my_squared_l2_distance_op(InterpretedFunction::State &state, uint64_t vector_size) { + double result = sq_l2<CT>(state.peek(1), state.peek(0), vector_size); + state.pop_pop_push(state.stash.create<DoubleValue>(result)); +} + +struct SelectOp { + template <typename CT> + static InterpretedFunction::op_function invoke() { + constexpr bool is_bfloat16 = std::is_same_v<CT, BFloat16>; + if constexpr (!is_bfloat16) { + return my_squared_l2_distance_op<CT>; + } else { + abort(); + } + } +}; + +bool compatible_cell_types(CellType lhs, CellType rhs) { + return ((lhs == rhs) && ((lhs == CellType::INT8) || + (lhs == CellType::FLOAT) || + (lhs == CellType::DOUBLE))); +} + +bool compatible_types(const ValueType &lhs, const ValueType &rhs) { + return (compatible_cell_types(lhs.cell_type(), rhs.cell_type()) && + lhs.is_dense() && rhs.is_dense() && + (lhs.nontrivial_indexed_dimensions() == rhs.nontrivial_indexed_dimensions())); +} + +} // namespace <unnamed> + +L2Distance::L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in) + : tensor_function::Op2(ValueType::double_type(), lhs_in, rhs_in) +{ +} + +InterpretedFunction::Instruction +L2Distance::compile_self(const ValueBuilderFactory &, Stash &) const +{ + auto lhs_t = lhs().result_type(); + auto rhs_t = rhs().result_type(); + REQUIRE_EQ(lhs_t.cell_type(), rhs_t.cell_type()); + REQUIRE_EQ(lhs_t.dense_subspace_size(), rhs_t.dense_subspace_size()); + auto op = typify_invoke<1, TypifyCellType, SelectOp>(lhs_t.cell_type()); + return InterpretedFunction::Instruction(op, lhs_t.dense_subspace_size()); +} + +const TensorFunction & +L2Distance::optimize(const TensorFunction &expr, Stash &stash) +{ + auto reduce = as<Reduce>(expr); + if (reduce && (reduce->aggr() == Aggr::SUM) && expr.result_type().is_double()) { + auto map = as<Map>(reduce->child()); + if (map && (map->function() == operation::Square::f)) { + auto join = as<Join>(map->child()); + if (join && (join->function() == operation::Sub::f)) { + if (compatible_types(join->lhs().result_type(), join->rhs().result_type())) { + return stash.create<L2Distance>(join->lhs(), join->rhs()); + } + } + } + } + return expr; +} + +} // namespace diff --git a/eval/src/vespa/eval/instruction/l2_distance.h b/eval/src/vespa/eval/instruction/l2_distance.h new file mode 100644 index 00000000000..95b11b6c229 --- /dev/null +++ b/eval/src/vespa/eval/instruction/l2_distance.h @@ -0,0 +1,21 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/eval/eval/tensor_function.h> + +namespace vespalib::eval { + +/** + * Tensor function for a squared euclidean distance producing a scalar result. + **/ +class L2Distance : public tensor_function::Op2 +{ +public: + L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in); + InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override; + bool result_is_mutable() const override { return true; } + static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash); +}; + +} // namespace diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java index 89a77599909..a2c5fee1e51 100644 --- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java +++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java @@ -131,9 +131,7 @@ public class FileReceiver { throw new RuntimeException("Failed writing file: ", e); } finally { try { - if (inprogressFile.exists()) { - Files.delete(inprogressFile.toPath()); - } + Files.deleteIfExists(inprogressFile.toPath()); } catch (IOException e) { log.log(Level.SEVERE, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } @@ -191,13 +189,14 @@ public class FileReceiver { } catch (FileAlreadyExistsException e) { // Don't fail if it already exists (we might get the file from several config servers when retrying, servers are down etc. // so it might be written already). Delete temp file/dir in that case, to avoid filling the disk. - log.log(Level.FINE, () -> "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + destination.getAbsolutePath() + - "', '" + destination.getAbsolutePath() + "' already exists"); - deleteFileOrDirectory(tempFile); + log.log(Level.FINE, () -> "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + + destination.getAbsolutePath() + "', it already exists"); } catch (IOException e) { String message = "Failed moving file '" + tempFile.getAbsolutePath() + "' to '" + destination.getAbsolutePath() + "'"; log.log(Level.SEVERE, message, e); throw new RuntimeException(message, e); + } finally { + deleteFileOrDirectory(tempFile); } } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java index 0dc06270031..0b33bf8a0b8 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java @@ -380,17 +380,18 @@ public class Flags { ZONE_ID, APPLICATION_ID); public static final UnboundBooleanFlag USE_LEGACY_LB_SERVICES = defineFeatureFlag( - "use-legacy-lb-services", true, + "use-legacy-lb-services", false, List.of("tokle"), "2021-11-22", "2021-12-31", "Whether to generate routing table based on legacy lb-services config", "Takes effect on container reboot", ZONE_ID, HOSTNAME); - public static final UnboundBooleanFlag CHANGE_ROUTING_STATUS_OF_ALL_UPSTREAMS = defineFeatureFlag( - "change-routing-status-of-all-upstreams", false, - List.of("mpolden"), "2021-12-02", "2021-12-15", - "Whether the controller should send all known upstreams to configserver when changing routing status of a deployment", - "Takes effect on the next change in routing status"); + public static final UnboundBooleanFlag USE_V8_DOC_MANAGER_CFG = defineFeatureFlag( + "use-v8-doc-manager-cfg", false, + List.of("arnej", "baldersheim"), "2021-12-09", "2022-12-31", + "Use new (preparing for Vespa 8) section in documentmanager.def", + "Takes effect at redeployment", + ZONE_ID, APPLICATION_ID); /** WARNING: public for testing: All flags should be defined in {@link Flags}. */ public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners, diff --git a/jdisc_core/abi-spec.json b/jdisc_core/abi-spec.json index 497fdfad501..d1b676b330f 100644 --- a/jdisc_core/abi-spec.json +++ b/jdisc_core/abi-spec.json @@ -531,7 +531,7 @@ "com.yahoo.jdisc.handler.FastContentOutputStream": { "superClass": "com.yahoo.jdisc.handler.AbstractContentOutputStream", "interfaces": [ - "com.google.common.util.concurrent.ListenableFuture" + "java.util.concurrent.Future" ], "attributes": [ "public" @@ -553,9 +553,8 @@ "fields": [] }, "com.yahoo.jdisc.handler.FastContentWriter": { - "superClass": "java.lang.Object", + "superClass": "java.util.concurrent.CompletableFuture", "interfaces": [ - "com.google.common.util.concurrent.ListenableFuture", "java.lang.AutoCloseable" ], "attributes": [ @@ -570,17 +569,12 @@ "public void close()", "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", "public boolean cancel(boolean)", - "public boolean isCancelled()", - "public boolean isDone()", - "public java.lang.Boolean get()", - "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()" + "public boolean isCancelled()" ], "fields": [] }, "com.yahoo.jdisc.handler.FutureCompletion": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "java.util.concurrent.CompletableFuture", "interfaces": [ "com.yahoo.jdisc.handler.CompletionHandler" ], @@ -593,35 +587,13 @@ "public void completed()", "public void failed(java.lang.Throwable)", "public final boolean cancel(boolean)", - "public final boolean isCancelled()" - ], - "fields": [] - }, - "com.yahoo.jdisc.handler.FutureConjunction": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.google.common.util.concurrent.ListenableFuture" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void addOperand(com.google.common.util.concurrent.ListenableFuture)", - "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", - "public final boolean cancel(boolean)", "public final boolean isCancelled()", - "public final boolean isDone()", - "public final java.lang.Boolean get()", - "public final java.lang.Boolean get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", - "public bridge synthetic java.lang.Object get()" + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)" ], "fields": [] }, "com.yahoo.jdisc.handler.FutureResponse": { - "superClass": "com.google.common.util.concurrent.AbstractFuture", + "superClass": "java.util.concurrent.CompletableFuture", "interfaces": [ "com.yahoo.jdisc.handler.ResponseHandler" ], @@ -632,6 +604,7 @@ "methods": [ "public void <init>()", "public void <init>(com.yahoo.jdisc.handler.ContentChannel)", + "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", "public void <init>(com.yahoo.jdisc.handler.ResponseHandler)", "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)", "public final boolean cancel(boolean)", @@ -705,7 +678,7 @@ "com.yahoo.jdisc.handler.RequestDispatch": { "superClass": "java.lang.Object", "interfaces": [ - "com.google.common.util.concurrent.ListenableFuture", + "java.util.concurrent.Future", "com.yahoo.jdisc.handler.ResponseHandler" ], "attributes": [ @@ -718,7 +691,7 @@ "protected java.lang.Iterable requestContent()", "public final com.yahoo.jdisc.handler.ContentChannel connect()", "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter()", - "public final com.google.common.util.concurrent.ListenableFuture dispatch()", + "public final java.util.concurrent.CompletableFuture dispatch()", "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)", "public final boolean cancel(boolean)", "public final boolean isCancelled()", @@ -748,8 +721,10 @@ "fields": [] }, "com.yahoo.jdisc.handler.ResponseDispatch": { - "superClass": "com.google.common.util.concurrent.ForwardingListenableFuture", - "interfaces": [], + "superClass": "java.lang.Object", + "interfaces": [ + "java.util.concurrent.Future" + ], "attributes": [ "public", "abstract" @@ -760,16 +735,18 @@ "protected java.lang.Iterable responseContent()", "public final com.yahoo.jdisc.handler.ContentChannel connect(com.yahoo.jdisc.handler.ResponseHandler)", "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter(com.yahoo.jdisc.handler.ResponseHandler)", - "public final com.google.common.util.concurrent.ListenableFuture dispatch(com.yahoo.jdisc.handler.ResponseHandler)", - "protected final com.google.common.util.concurrent.ListenableFuture delegate()", + "public final java.util.concurrent.CompletableFuture dispatch(com.yahoo.jdisc.handler.ResponseHandler)", "public final boolean cancel(boolean)", "public final boolean isCancelled()", + "public boolean isDone()", + "public java.lang.Boolean get()", + "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)", "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.nio.ByteBuffer[])", "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.lang.Iterable)", "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.nio.ByteBuffer[])", "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.lang.Iterable)", - "protected bridge synthetic java.util.concurrent.Future delegate()", - "protected bridge synthetic java.lang.Object delegate()" + "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)", + "public bridge synthetic java.lang.Object get()" ], "fields": [] }, @@ -934,332 +911,5 @@ "public abstract void close()" ], "fields": [] - }, - "com.yahoo.jdisc.test.MockMetric": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.Metric" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void set(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)", - "public void add(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)", - "public com.yahoo.jdisc.Metric$Context createContext(java.util.Map)", - "public java.util.Map metrics()", - "public java.lang.String toString()" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingClientProvider": { - "superClass": "com.yahoo.jdisc.NoopSharedResource", - "interfaces": [ - "com.yahoo.jdisc.service.ClientProvider" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void start()", - "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)", - "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingCompletionHandler": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.handler.CompletionHandler" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void completed()", - "public void failed(java.lang.Throwable)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingContentChannel": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.handler.ContentChannel" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)", - "public void close(com.yahoo.jdisc.handler.CompletionHandler)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingOsgiFramework": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.application.OsgiFramework" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public java.util.List installBundle(java.lang.String)", - "public void startBundles(java.util.List, boolean)", - "public void refreshPackages()", - "public org.osgi.framework.BundleContext bundleContext()", - "public java.util.List bundles()", - "public java.util.List getBundles(org.osgi.framework.Bundle)", - "public void allowDuplicateBundles(java.util.Collection)", - "public void start()", - "public void stop()" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingRequest": { - "superClass": "java.lang.Object", - "interfaces": [], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public static varargs com.yahoo.jdisc.Request newInstance(java.lang.String, com.google.inject.Module[])" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingRequestHandler": { - "superClass": "com.yahoo.jdisc.NoopSharedResource", - "interfaces": [ - "com.yahoo.jdisc.handler.RequestHandler" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)", - "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingResponseHandler": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.handler.ResponseHandler" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.NonWorkingServerProvider": { - "superClass": "com.yahoo.jdisc.NoopSharedResource", - "interfaces": [ - "com.yahoo.jdisc.service.ServerProvider" - ], - "attributes": [ - "public", - "final" - ], - "methods": [ - "public void <init>()", - "public void start()", - "public void close()" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter": { - "superClass": "java.lang.Object", - "interfaces": [], - "attributes": [ - "public", - "interface", - "abstract" - ], - "methods": [ - "public abstract com.google.inject.Module newConfigModule()", - "public abstract java.lang.Class getServerProviderClass()", - "public abstract java.lang.Object newClient(com.yahoo.jdisc.service.ServerProvider)", - "public abstract java.lang.Object executeRequest(java.lang.Object, boolean)", - "public abstract java.lang.Iterable newResponseContent()", - "public abstract void validateResponse(java.lang.Object)" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.ServerProviderConformanceTest$ConformanceException": { - "superClass": "java.lang.RuntimeException", - "interfaces": [], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(com.yahoo.jdisc.test.ServerProviderConformanceTest$Event)", - "public java.lang.String getMessage()" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.ServerProviderConformanceTest": { - "superClass": "java.lang.Object", - "interfaces": [], - "attributes": [ - "public", - "abstract" - ], - "methods": [ - "public void <init>()", - "public abstract void testContainerNotReadyException()", - "public abstract void testBindingSetNotFoundException()", - "public abstract void testNoBindingSetSelectedException()", - "public abstract void testBindingNotFoundException()", - "public abstract void testRequestHandlerWithSyncCloseResponse()", - "public abstract void testRequestHandlerWithSyncWriteResponse()", - "public abstract void testRequestHandlerWithSyncHandleResponse()", - "public abstract void testRequestHandlerWithAsyncHandleResponse()", - "public abstract void testRequestException()", - "public abstract void testRequestExceptionWithSyncCloseResponse()", - "public abstract void testRequestExceptionWithSyncWriteResponse()", - "public abstract void testRequestNondeterministicExceptionWithSyncHandleResponse()", - "public abstract void testRequestExceptionBeforeResponseWriteWithSyncHandleResponse()", - "public abstract void testRequestExceptionAfterResponseWriteWithSyncHandleResponse()", - "public abstract void testRequestNondeterministicExceptionWithAsyncHandleResponse()", - "public abstract void testRequestExceptionBeforeResponseWriteWithAsyncHandleResponse()", - "public abstract void testRequestExceptionAfterResponseCloseNoContentWithAsyncHandleResponse()", - "public abstract void testRequestExceptionAfterResponseWriteWithAsyncHandleResponse()", - "public abstract void testRequestContentWriteWithSyncCompletion()", - "public abstract void testRequestContentWriteWithAsyncCompletion()", - "public abstract void testRequestContentWriteWithNondeterministicSyncFailure()", - "public abstract void testRequestContentWriteWithSyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentWriteWithSyncFailureAfterResponseWrite()", - "public abstract void testRequestContentWriteWithNondeterministicAsyncFailure()", - "public abstract void testRequestContentWriteWithAsyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseWrite()", - "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseCloseNoContent()", - "public abstract void testRequestContentWriteNondeterministicException()", - "public abstract void testRequestContentWriteExceptionBeforeResponseWrite()", - "public abstract void testRequestContentWriteExceptionAfterResponseWrite()", - "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContent()", - "public abstract void testRequestContentWriteNondeterministicExceptionWithSyncCompletion()", - "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithSyncCompletion()", - "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithSyncCompletion()", - "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithSyncCompletion()", - "public abstract void testRequestContentWriteNondeterministicExceptionWithAsyncCompletion()", - "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithAsyncCompletion()", - "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithAsyncCompletion()", - "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithAsyncCompletion()", - "public abstract void testRequestContentWriteExceptionWithNondeterministicSyncFailure()", - "public abstract void testRequestContentWriteExceptionWithSyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseWrite()", - "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseCloseNoContent()", - "public abstract void testRequestContentWriteExceptionWithNondeterministicAsyncFailure()", - "public abstract void testRequestContentWriteExceptionWithAsyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseWrite()", - "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseCloseNoContent()", - "public abstract void testRequestContentCloseWithSyncCompletion()", - "public abstract void testRequestContentCloseWithAsyncCompletion()", - "public abstract void testRequestContentCloseWithNondeterministicSyncFailure()", - "public abstract void testRequestContentCloseWithSyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentCloseWithSyncFailureAfterResponseWrite()", - "public abstract void testRequestContentCloseWithSyncFailureAfterResponseCloseNoContent()", - "public abstract void testRequestContentCloseWithNondeterministicAsyncFailure()", - "public abstract void testRequestContentCloseWithAsyncFailureBeforeResponseWrite()", - "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseWrite()", - "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseCloseNoContent()", - "public abstract void testRequestContentCloseNondeterministicException()", - "public abstract void testRequestContentCloseExceptionBeforeResponseWrite()", - "public abstract void testRequestContentCloseExceptionAfterResponseWrite()", - "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContent()", - "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncCompletion()", - "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncCompletion()", - "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncCompletion()", - "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncCompletion()", - "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncCompletion()", - "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncCompletion()", - "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncCompletion()", - "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncCompletion()", - "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncFailure()", - "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncFailure()", - "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncFailure()", - "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncFailure()", - "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncFailure()", - "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncFailure()", - "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncFailure()", - "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncFailure()", - "public abstract void testResponseWriteCompletionException()", - "public abstract void testResponseCloseCompletionException()", - "public abstract void testResponseCloseCompletionExceptionNoContent()", - "protected varargs void runTest(com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter, com.google.inject.Module[])" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.TestDriver": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.application.ContainerActivator", - "com.yahoo.jdisc.service.CurrentContainer" - ], - "attributes": [ - "public" - ], - "methods": [ - "public com.yahoo.jdisc.application.ContainerBuilder newContainerBuilder()", - "public com.yahoo.jdisc.application.DeactivatedContainer activateContainer(com.yahoo.jdisc.application.ContainerBuilder)", - "public com.yahoo.jdisc.Container newReference(java.net.URI)", - "public com.yahoo.jdisc.core.BootstrapLoader bootstrapLoader()", - "public com.yahoo.jdisc.application.Application application()", - "public com.yahoo.jdisc.application.OsgiFramework osgiFramework()", - "public com.yahoo.jdisc.handler.ContentChannel connectRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)", - "public java.util.concurrent.Future dispatchRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)", - "public void scheduleClose()", - "public boolean awaitClose(long, java.util.concurrent.TimeUnit)", - "public boolean close()", - "public com.yahoo.jdisc.handler.RequestDispatch newRequestDispatch(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)", - "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(java.lang.Class, com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(java.lang.Class, com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(com.yahoo.jdisc.application.Application, com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(com.yahoo.jdisc.application.Application, com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstance(com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstanceWithoutOsgi(com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newApplicationBundleInstance(java.lang.String, boolean, com.google.inject.Module[])", - "public static varargs com.yahoo.jdisc.test.TestDriver newInstance(com.yahoo.jdisc.application.OsgiFramework, java.lang.String, boolean, com.google.inject.Module[])", - "public static com.yahoo.jdisc.core.FelixFramework newOsgiFramework()", - "public static com.yahoo.jdisc.application.OsgiFramework newNonWorkingOsgiFramework()" - ], - "fields": [] - }, - "com.yahoo.jdisc.test.TestTimer": { - "superClass": "java.lang.Object", - "interfaces": [ - "com.yahoo.jdisc.Timer" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>()", - "public void <init>(java.time.Instant)", - "public void setMillis(long)", - "public void advanceMillis(long)", - "public void advanceSeconds(long)", - "public void advanceMinutes(long)", - "public void advance(java.time.Duration)", - "public java.time.Instant currentTime()", - "public long currentTimeMillis()" - ], - "fields": [] } }
\ No newline at end of file diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java index 54e50df5a25..e001db2ab81 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentOutputStream.java @@ -1,12 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; - import java.nio.ByteBuffer; import java.util.Objects; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -20,7 +19,7 @@ import java.util.concurrent.TimeoutException; * * @author Simon Thoresen Hult */ -public class FastContentOutputStream extends AbstractContentOutputStream implements ListenableFuture<Boolean> { +public class FastContentOutputStream extends AbstractContentOutputStream implements Future<Boolean> { private final FastContentWriter out; @@ -78,7 +77,6 @@ public class FastContentOutputStream extends AbstractContentOutputStream impleme return out.get(timeout, unit); } - @Override public void addListener(Runnable listener, Executor executor) { out.addListener(listener, executor); } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java index 596ae07f1d5..7c278c67d59 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FastContentWriter.java @@ -1,16 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; - import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Objects; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -25,13 +20,12 @@ import java.util.concurrent.atomic.AtomicInteger; * * @author Simon Thoresen Hult */ -public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseable { +public class FastContentWriter extends CompletableFuture<Boolean> implements AutoCloseable { private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicInteger numPendingCompletions = new AtomicInteger(); private final CompletionHandler completionHandler = new SimpleCompletionHandler(); private final ContentChannel out; - private final SettableFuture<Boolean> future = SettableFuture.create(); /** * <p>Creates a new FastContentWriter that encapsulates a given {@link ContentChannel}.</p> @@ -87,7 +81,7 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab try { out.write(buf, completionHandler); } catch (Throwable t) { - future.setException(t); + completeExceptionally(t); throw t; } } @@ -103,14 +97,13 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab try { out.close(completionHandler); } catch (Throwable t) { - future.setException(t); + completeExceptionally(t); throw t; } } - @Override public void addListener(Runnable listener, Executor executor) { - future.addListener(listener, executor); + whenCompleteAsync((__, ___) -> listener.run(), executor); } @Override @@ -123,34 +116,19 @@ public class FastContentWriter implements ListenableFuture<Boolean>, AutoCloseab return false; } - @Override - public boolean isDone() { - return future.isDone(); - } - - @Override - public Boolean get() throws InterruptedException, ExecutionException { - return future.get(); - } - - @Override - public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return future.get(timeout, unit); - } - private class SimpleCompletionHandler implements CompletionHandler { @Override public void completed() { numPendingCompletions.decrementAndGet(); if (closed.get() && numPendingCompletions.get() == 0) { - future.set(true); + complete(true); } } @Override public void failed(Throwable t) { - future.setException(t); + completeExceptionally(t); } } } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java index ab989b89b1f..a188be6145f 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureCompletion.java @@ -1,7 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; /** * <p>This class provides an implementation of {@link CompletionHandler} that allows you to wait for either {@link @@ -13,16 +14,16 @@ import com.google.common.util.concurrent.AbstractFuture; * * @author Simon Thoresen Hult */ -public final class FutureCompletion extends AbstractFuture<Boolean> implements CompletionHandler { +public final class FutureCompletion extends CompletableFuture<Boolean> implements CompletionHandler { @Override public void completed() { - set(true); + complete(true); } @Override public void failed(Throwable t) { - setException(t); + completeExceptionally(t); } @Override @@ -34,4 +35,6 @@ public final class FutureCompletion extends AbstractFuture<Boolean> implements C public final boolean isCancelled() { return false; } + + public void addListener(Runnable r, Executor e) { whenCompleteAsync((__, ___) -> r.run(), e); } } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java index c2e32f3ea56..ba304d9e2de 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureConjunction.java @@ -1,43 +1,50 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.JdkFutureAdapters; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.concurrent.CompletableFutures; import java.util.LinkedList; import java.util.List; -import java.util.concurrent.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class implements a Future<Boolean> that is conjunction of zero or more other Future<Boolean>s, * i.e. it evaluates to <code>true</code> if, and only if, all its operands evaluate to <code>true</code>. To use this class, - * simply create an instance of it and add operands to it using the {@link #addOperand(ListenableFuture)} method.</p> - * TODO: consider rewriting usage of FutureConjunction to use CompletableFuture instead. + * simply create an instance of it and add operands to it using the {@link #addOperand(CompletableFuture)} method.</p> * * @author Simon Thoresen Hult */ -public final class FutureConjunction implements ListenableFuture<Boolean> { +final class FutureConjunction implements Future<Boolean> { - private final List<ListenableFuture<Boolean>> operands = new LinkedList<>(); + private final List<CompletableFuture<Boolean>> operands = new LinkedList<>(); /** - * <p>Adds a ListenableFuture<Boolean> to this conjunction. This can be called at any time, even after having called + * <p>Adds a {@link CompletableFuture} to this conjunction. This can be called at any time, even after having called * {@link #get()} previously.</p> * * @param operand The operand to add to this conjunction. */ - public void addOperand(ListenableFuture<Boolean> operand) { + public void addOperand(CompletableFuture<Boolean> operand) { operands.add(operand); } - @Override public void addListener(Runnable listener, Executor executor) { - Futures.allAsList(operands).addListener(listener, executor); + CompletableFutures.allOf(operands) + .whenCompleteAsync((__, ___) -> listener.run(), executor); + } + + CompletableFuture<Boolean> completableFuture() { + return CompletableFutures.allOf(operands) + .thenApply(ops -> ops.stream().allMatch(bool -> bool)); } @Override - public final boolean cancel(boolean mayInterruptIfRunning) { + public boolean cancel(boolean mayInterruptIfRunning) { boolean ret = true; for (Future<Boolean> op : operands) { if (!op.cancel(mayInterruptIfRunning)) { @@ -48,7 +55,7 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final boolean isCancelled() { + public boolean isCancelled() { for (Future<Boolean> op : operands) { if (!op.isCancelled()) { return false; @@ -58,7 +65,7 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final boolean isDone() { + public boolean isDone() { for (Future<Boolean> op : operands) { if (!op.isDone()) { return false; @@ -68,8 +75,8 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final Boolean get() throws InterruptedException, ExecutionException { - Boolean ret = Boolean.TRUE; + public Boolean get() throws InterruptedException, ExecutionException { + boolean ret = Boolean.TRUE; for (Future<Boolean> op : operands) { if (!op.get()) { ret = Boolean.FALSE; @@ -79,9 +86,9 @@ public final class FutureConjunction implements ListenableFuture<Boolean> { } @Override - public final Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, + public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - Boolean ret = Boolean.TRUE; + boolean ret = Boolean.TRUE; long nanos = unit.toNanos(timeout); long lastTime = System.nanoTime(); for (Future<Boolean> op : operands) { diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java index b8073865667..2284c563f50 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/FutureResponse.java @@ -1,16 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; import com.yahoo.jdisc.Response; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; + /** * This class provides an implementation of {@link ResponseHandler} that allows you to wait for a {@link Response} to * be returned. * * @author Simon Thoresen Hult */ -public final class FutureResponse extends AbstractFuture<Response> implements ResponseHandler { +public final class FutureResponse extends CompletableFuture<Response> implements ResponseHandler { private final ResponseHandler handler; @@ -38,6 +40,8 @@ public final class FutureResponse extends AbstractFuture<Response> implements Re }); } + public void addListener(Runnable r, Executor e) { whenCompleteAsync((__, ___) -> r.run(), e); } + /** * <p>Constructs a new FutureResponse that calls the given {@link ResponseHandler} when {@link * #handleResponse(Response)} is invoked.</p> @@ -50,7 +54,7 @@ public final class FutureResponse extends AbstractFuture<Response> implements Re @Override public ContentChannel handleResponse(Response response) { - set(response); + complete(response); return handler.handleResponse(response); } diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java index c85aa6375af..c1457290904 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/RequestDispatch.java @@ -1,19 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import com.yahoo.jdisc.References; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.ResourceReference; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.SharedResource; -import com.yahoo.jdisc.References; import java.nio.ByteBuffer; import java.util.Collections; -import java.util.concurrent.*; -import java.util.ArrayList; -import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class provides a convenient way of safely dispatching a {@link Request}. Using this class you do not have to @@ -46,7 +47,7 @@ import java.util.List; * * @author Simon Thoresen Hult */ -public abstract class RequestDispatch implements ListenableFuture<Response>, ResponseHandler { +public abstract class RequestDispatch implements Future<Response>, ResponseHandler { private final FutureConjunction completions = new FutureConjunction(); private final FutureResponse futureResponse = new FutureResponse(this); @@ -106,22 +107,26 @@ public abstract class RequestDispatch implements ListenableFuture<Response>, Res * * @return A Future that can be waited for. */ - public final ListenableFuture<Response> dispatch() { + public final CompletableFuture<Response> dispatch() { try (FastContentWriter writer = new FastContentWriter(connect())) { for (ByteBuffer buf : requestContent()) { writer.write(buf); } completions.addOperand(writer); } - return this; + return CompletableFuture.allOf(completions.completableFuture(), futureResponse) + .thenApply(__ -> { + try { + return futureResponse.get(); + } catch (InterruptedException | ExecutionException e) { + throw new IllegalStateException(e); // Should not happens since both futures are complete + } + }); } - @Override public void addListener(Runnable listener, Executor executor) { - List<ListenableFuture<?>> combined = new ArrayList<>(2); - combined.add(completions); - combined.add(futureResponse); - Futures.allAsList(combined).addListener(listener, executor); + CompletableFuture.allOf(completions.completableFuture(), futureResponse) + .whenCompleteAsync((__, ___) -> listener.run(), executor); } @Override diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java index 377c8ecf4a9..9387171c1ac 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/ResponseDispatch.java @@ -1,15 +1,17 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ForwardingListenableFuture; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.SharedResource; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * <p>This class provides a convenient way of safely dispatching a {@link Response}. It is similar in use to {@link @@ -34,7 +36,7 @@ import java.util.concurrent.Future; * * @author Simon Thoresen Hult */ -public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolean> { +public abstract class ResponseDispatch implements Future<Boolean> { private final FutureConjunction completions = new FutureConjunction(); @@ -90,19 +92,14 @@ public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolea * @param responseHandler The ResponseHandler to dispatch to. * @return A Future that can be waited for. */ - public final ListenableFuture<Boolean> dispatch(ResponseHandler responseHandler) { + public final CompletableFuture<Boolean> dispatch(ResponseHandler responseHandler) { try (FastContentWriter writer = new FastContentWriter(connect(responseHandler))) { for (ByteBuffer buf : responseContent()) { writer.write(buf); } completions.addOperand(writer); } - return this; - } - - @Override - protected final ListenableFuture<Boolean> delegate() { - return completions; + return completions.completableFuture(); } @Override @@ -115,6 +112,15 @@ public abstract class ResponseDispatch extends ForwardingListenableFuture<Boolea return false; } + @Override public boolean isDone() { return completions.isDone(); } + + @Override public Boolean get() throws InterruptedException, ExecutionException { return completions.get(); } + + @Override + public Boolean get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return completions.get(timeout, unit); + } + /** * <p>Factory method for creating a ResponseDispatch with a {@link Response} that has the given status code, and * ByteBuffer content.</p> diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java index 199a12216ad..bfb4088aa99 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java @@ -4,5 +4,4 @@ * * @see com.yahoo.jdisc.test.TestDriver */ -@com.yahoo.api.annotations.PublicApi package com.yahoo.jdisc.test; diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java index aa6c4ce3b1b..45bc230896f 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FastContentWriterTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; import org.mockito.Mockito; @@ -188,7 +187,7 @@ public class FastContentWriterTestCase { ReadableContentChannel buf = new ReadableContentChannel(); FastContentWriter out = new FastContentWriter(buf); RunnableLatch listener = new RunnableLatch(); - out.addListener(listener, MoreExecutors.directExecutor()); + out.addListener(listener, Runnable::run); out.write(new byte[] { 6, 9 }); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java index 6c3803f4f56..ef63b200b5f 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureCompletionTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; import java.util.concurrent.ExecutionException; @@ -91,14 +90,14 @@ public class FutureCompletionTestCase { public void requireThatCompletionCanBeListenedTo() throws InterruptedException { FutureCompletion completion = new FutureCompletion(); RunnableLatch listener = new RunnableLatch(); - completion.addListener(listener, MoreExecutors.directExecutor()); + completion.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); completion.completed(); assertTrue(listener.await(600, TimeUnit.SECONDS)); completion = new FutureCompletion(); listener = new RunnableLatch(); - completion.addListener(listener, MoreExecutors.directExecutor()); + completion.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); completion.failed(new Throwable()); assertTrue(listener.await(600, TimeUnit.SECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java index 346b06e0f23..1aa78a16dfc 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureConjunctionTestCase.java @@ -1,41 +1,37 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.AbstractFuture; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; import org.junit.Test; -import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult */ public class FutureConjunctionTestCase { - private final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); + private final ExecutorService executor = Executors.newCachedThreadPool(); @Test public void requireThatAllFuturesAreWaitedFor() throws Exception { final CountDownLatch latch = new CountDownLatch(1); FutureConjunction future = new FutureConjunction(); - future.addOperand(executor.submit(new Callable<Boolean>() { - - @Override - public Boolean call() throws Exception { - return latch.await(600, TimeUnit.SECONDS); - } - })); + CompletableFuture<Boolean> cf = new CompletableFuture<>(); + cf.completeAsync(() -> { + try { return latch.await(600, TimeUnit.SECONDS); } + catch (InterruptedException e) { return false; } + }, executor); + future.addOperand(cf); try { future.get(100, TimeUnit.MILLISECONDS); fail(); @@ -118,7 +114,7 @@ public class FutureConjunctionTestCase { public void requireThatConjunctionCanBeListenedTo() throws InterruptedException { FutureConjunction conjunction = new FutureConjunction(); RunnableLatch listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertTrue(listener.await(600, TimeUnit.SECONDS)); conjunction = new FutureConjunction(); @@ -127,7 +123,7 @@ public class FutureConjunctionTestCase { FutureBoolean bar = new FutureBoolean(); conjunction.addOperand(bar); listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); foo.set(true); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); @@ -140,7 +136,7 @@ public class FutureConjunctionTestCase { bar = new FutureBoolean(); conjunction.addOperand(bar); listener = new RunnableLatch(); - conjunction.addListener(listener, MoreExecutors.directExecutor()); + conjunction.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); bar.set(true); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); @@ -190,14 +186,14 @@ public class FutureConjunctionTestCase { return foo.isCancelled(); } - private static class FutureBoolean extends AbstractFuture<Boolean> { + private static class FutureBoolean extends CompletableFuture<Boolean> { public boolean set(Boolean val) { - return super.set(val); + return super.complete(val); } } - private static class MyFuture extends AbstractFuture<Boolean> { + private static class MyFuture extends CompletableFuture<Boolean> { final boolean value; final boolean isDone; @@ -236,19 +232,19 @@ public class FutureConjunctionTestCase { return value; } - static ListenableFuture<Boolean> newInstance(boolean value) { + static CompletableFuture<Boolean> newInstance(boolean value) { return new MyFuture(value, false, false, false); } - static ListenableFuture<Boolean> newIsDone(boolean isDone) { + static CompletableFuture<Boolean> newIsDone(boolean isDone) { return new MyFuture(false, isDone, false, false); } - static ListenableFuture<Boolean> newCanCancel(boolean canCancel) { + static CompletableFuture<Boolean> newCanCancel(boolean canCancel) { return new MyFuture(false, false, canCancel, false); } - static ListenableFuture<Boolean> newIsCancelled(boolean isCancelled) { + static CompletableFuture<Boolean> newIsCancelled(boolean isCancelled) { return new MyFuture(false, false, false, isCancelled); } } diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java index 440698257a4..398f288e307 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/FutureResponseTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.test.NonWorkingContentChannel; import org.junit.Test; @@ -73,7 +72,7 @@ public class FutureResponseTestCase { public void requireThatResponseCanBeListenedTo() throws InterruptedException { FutureResponse response = new FutureResponse(); RunnableLatch listener = new RunnableLatch(); - response.addListener(listener, MoreExecutors.directExecutor()); + response.addListener(listener, Runnable::run); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); response.handleResponse(new Response(Response.Status.OK)); assertTrue(listener.await(600, TimeUnit.SECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java index 3b49d1f349e..6ec78f01733 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/RequestDispatchTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -17,12 +16,12 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -218,7 +217,7 @@ public class RequestDispatchTestCase { protected Request newRequest() { return new Request(driver, URI.create("http://localhost/")); } - }.dispatch().addListener(listener, MoreExecutors.directExecutor()); + }.dispatch().whenComplete((__, ___) -> listener.run()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); ContentChannel responseContent = ResponseDispatch.newInstance(Response.Status.OK) .connect(requestHandler.responseHandler); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java index f9a5c22837f..4006ab072cb 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ResponseDispatchTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.MoreExecutors; import com.yahoo.jdisc.Response; import org.junit.Test; @@ -14,13 +13,13 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -179,7 +178,7 @@ public class ResponseDispatchTestCase { ReadableContentChannel responseContent = new ReadableContentChannel(); ResponseDispatch.newInstance(6, ByteBuffer.allocate(9)) .dispatch(new MyResponseHandler(responseContent)) - .addListener(listener, MoreExecutors.directExecutor()); + .whenComplete((__, ___) -> listener.run()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); assertNotNull(responseContent.read()); assertFalse(listener.await(100, TimeUnit.MILLISECONDS)); diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java index 71f207bbbff..f639877b87b 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/handler/ThreadedRequestHandlerTestCase.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.handler; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.jdisc.Request; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.ContainerBuilder; @@ -12,18 +11,19 @@ import org.junit.Test; import java.net.URI; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * @author Simon Thoresen Hult @@ -159,8 +159,8 @@ public class ThreadedRequestHandlerTestCase { return driver; } - private static ListenableFuture<Response> dispatchRequest(final CurrentContainer container, final String uri, - final ByteBuffer... content) { + private static CompletableFuture<Response> dispatchRequest(final CurrentContainer container, final String uri, + final ByteBuffer... content) { return new RequestDispatch() { @Override diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java b/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java index 01b1e72d0b6..c9c7ec1db48 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/test/ServerProviderConformanceTestTest.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jdisc.test; -import com.google.common.util.concurrent.SettableFuture; import com.google.inject.Inject; import com.google.inject.Module; import com.google.inject.util.Modules; @@ -20,6 +19,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collections; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -571,7 +571,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance try { request = new Request(server.container, URI.create("http://localhost/")); } catch (Throwable t) { - responseHandler.response.set(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); + responseHandler.response.complete(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); return responseHandler; } try { @@ -581,7 +581,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance } tryClose(out); } catch (Throwable t) { - responseHandler.response.set(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); + responseHandler.response.complete(new Response(Response.Status.INTERNAL_SERVER_ERROR, t)); // Simulate handling the failure. t.getMessage(); return responseHandler; @@ -594,13 +594,13 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance private static class MyResponseHandler implements ResponseHandler { - final SettableFuture<Response> response = SettableFuture.create(); - final SettableFuture<String> content = SettableFuture.create(); + final CompletableFuture<Response> response = new CompletableFuture<>(); + final CompletableFuture<String> content = new CompletableFuture<>(); final ByteArrayOutputStream out = new ByteArrayOutputStream(); @Override public ContentChannel handleResponse(final Response response) { - this.response.set(response); + this.response.complete(response); return new ContentChannel() { @Override @@ -613,7 +613,7 @@ public class ServerProviderConformanceTestTest extends ServerProviderConformance @Override public void close(final CompletionHandler handler) { - content.set(new String(out.toByteArray(), StandardCharsets.UTF_8)); + content.complete(new String(out.toByteArray(), StandardCharsets.UTF_8)); tryComplete(handler); } }; diff --git a/linguistics-components/abi-spec.json b/linguistics-components/abi-spec.json index 28025d84f25..f1deac67dc2 100644 --- a/linguistics-components/abi-spec.json +++ b/linguistics-components/abi-spec.json @@ -30,6 +30,7 @@ "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder collapseUnknowns(boolean)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder scoring(com.yahoo.language.sentencepiece.SentencePieceConfig$Scoring$Enum)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(com.yahoo.language.sentencepiece.SentencePieceConfig$Model$Builder)", + "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(java.util.function.Consumer)", "public com.yahoo.language.sentencepiece.SentencePieceConfig$Builder model(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", diff --git a/logd/src/logd/watcher.cpp b/logd/src/logd/watcher.cpp index bab80dab7bd..23d1580dbf4 100644 --- a/logd/src/logd/watcher.cpp +++ b/logd/src/logd/watcher.cpp @@ -140,6 +140,7 @@ Watcher::watchfile() vespalib::SigCatch catcher; int sleepcount = 0; vespalib::system_time created = vespalib::system_time::min(); + vespalib::system_time lastPrune = vespalib::system_time::min(); again: // XXX should close and/or check _wfd first ? @@ -215,6 +216,11 @@ Watcher::watchfile() bool wantrotate = (now > created + _confsubscriber.getRotateAge()) || (sb.st_size > _confsubscriber.getRotateSize()); + if (now > lastPrune + 61s) { + removeOldLogs(filename); + lastPrune = now; + } + if (rotate) { vespalib::duration rotTime = rotTimer.elapsed(); off_t overflow_size = (1.1 * _confsubscriber.getRotateSize()); @@ -240,7 +246,6 @@ Watcher::watchfile() LOG(warning, "logfile spamming %d times, aggressively removing %s", spamfill_counter, newfn); unlink(newfn); } - removeOldLogs(filename); goto again; } } else if (stat(filename, &sb) != 0 diff --git a/messagebus/abi-spec.json b/messagebus/abi-spec.json index 623904bef8d..bff28986119 100644 --- a/messagebus/abi-spec.json +++ b/messagebus/abi-spec.json @@ -413,6 +413,7 @@ "public void <init>()", "public void <init>(com.yahoo.messagebus.MessagebusConfig)", "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Builder routingtable(java.util.List)", "public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)", "public final java.lang.String getDefMd5()", @@ -454,8 +455,10 @@ "public void <init>(com.yahoo.messagebus.MessagebusConfig$Routingtable)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder protocol(java.lang.String)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(com.yahoo.messagebus.MessagebusConfig$Routingtable$Hop$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder hop(java.util.List)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(com.yahoo.messagebus.MessagebusConfig$Routingtable$Route$Builder)", + "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(java.util.function.Consumer)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable$Builder route(java.util.List)", "public com.yahoo.messagebus.MessagebusConfig$Routingtable build()" ], diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java index 9277d09d02a..64a21a54999 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/MetricsFormatter.java @@ -13,6 +13,7 @@ import java.util.Locale; * @author Unknown */ public class MetricsFormatter { + private final boolean includeServiceName; private final boolean isSystemMetric; private final DecimalFormat df = new DecimalFormat("0.000", new DecimalFormatSymbols(Locale.ENGLISH)); diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java index 9e7849f635e..4d1fb802afc 100644 --- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java +++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java @@ -46,7 +46,6 @@ import static org.junit.Assert.fail; /** * @author gjoranv */ -@SuppressWarnings("UnstableApiUsage") public class ApplicationMetricsHandlerTest { private static final ObjectMapper jsonMapper = new ObjectMapper(); @@ -79,8 +78,8 @@ public class ApplicationMetricsHandlerTest { public void setup() { setupWireMock(); - ApplicationMetricsRetriever applicationMetricsRetriever = new ApplicationMetricsRetriever( - nodesConfig(MOCK_METRICS_PATH)); + ApplicationMetricsRetriever applicationMetricsRetriever = + new ApplicationMetricsRetriever(nodesConfig(MOCK_METRICS_PATH)); ApplicationMetricsHandler handler = new ApplicationMetricsHandler(Executors.newSingleThreadExecutor(), applicationMetricsRetriever, diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java index 9336451d08d..38e725360a0 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java @@ -162,7 +162,7 @@ public class RealNodeRepository implements NodeRepository { return new NodeSpec( node.hostname, - Optional.ofNullable(node.openStackId), + Optional.ofNullable(node.id), Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString), Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString), nodeState, @@ -244,7 +244,7 @@ public class RealNodeRepository implements NodeRepository { private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) { NodeRepositoryNode node = new NodeRepositoryNode(); - node.openStackId = addNode.id.orElse("fake-" + addNode.hostname); + node.id = addNode.id.orElse("fake-" + addNode.hostname); node.hostname = addNode.hostname; node.parentHostname = addNode.parentHostname.orElse(null); addNode.nodeFlavor.ifPresent(f -> node.flavor = f); @@ -269,7 +269,7 @@ public class RealNodeRepository implements NodeRepository { public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) { NodeRepositoryNode node = new NodeRepositoryNode(); - node.openStackId = nodeAttributes.getHostId().orElse(null); + node.id = nodeAttributes.getHostId().orElse(null); node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null); node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null); node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java index 1e51fe279bb..f99fb3d8b76 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java @@ -25,8 +25,8 @@ public class NodeRepositoryNode { public Set<String> ipAddresses; @JsonProperty("additionalIpAddresses") public Set<String> additionalIpAddresses; - @JsonProperty("openStackId") - public String openStackId; + @JsonProperty("id") + public String id; @JsonProperty("flavor") public String flavor; @JsonProperty("resources") @@ -99,7 +99,7 @@ public class NodeRepositoryNode { ", hostname='" + hostname + '\'' + ", ipAddresses=" + ipAddresses + ", additionalIpAddresses=" + additionalIpAddresses + - ", openStackId='" + openStackId + '\'' + + ", id='" + id + '\'' + ", modelName='" + modelName + '\'' + ", flavor='" + flavor + '\'' + ", resources=" + resources + diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java index f44785cfab3..71a6d661594 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java @@ -1,8 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.applications; -import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler; - import java.util.Objects; /** diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java index e0ccbe10b10..ad20f68ca33 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java @@ -26,7 +26,7 @@ public class Cluster { private final ClusterSpec.Id id; private final boolean exclusive; private final ClusterResources min, max; - private boolean required; + private final boolean required; private final Optional<Suggestion> suggested; private final Optional<ClusterResources> target; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java index 078b0621a99..849ea03665b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java @@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits; import java.util.List; import java.util.Optional; +import java.util.stream.Collectors; /** * @author bratseth @@ -139,23 +140,21 @@ public class AllocatableClusterResources { public static Optional<AllocatableClusterResources> from(ClusterResources wantedResources, ClusterSpec clusterSpec, Limits applicationLimits, - boolean required, NodeList hosts, NodeRepository nodeRepository) { - var capacityPolicies = new CapacityPolicies(nodeRepository); var systemLimits = new NodeResourceLimits(nodeRepository); boolean exclusive = clusterSpec.isExclusive(); - int actualNodes = capacityPolicies.decideSize(wantedResources.nodes(), required, true, false, clusterSpec); if ( !clusterSpec.isExclusive() && !nodeRepository.zone().getCloud().dynamicProvisioning()) { // We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive); advertisedResources = systemLimits.enlargeToLegal(advertisedResources, clusterSpec.type(), exclusive); // Ask for something legal advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail - advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can request var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // What we'll really get - if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) return Optional.empty(); + if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) + return Optional.empty(); + if (matchesAny(hosts, advertisedResources)) - return Optional.of(new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources), + return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources), advertisedResources, wantedResources, clusterSpec)); @@ -168,7 +167,6 @@ public class AllocatableClusterResources { for (Flavor flavor : nodeRepository.flavors().getFlavors()) { // Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor); - advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can get NodeResources realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // Adjust where we don't need exact match to the flavor @@ -184,7 +182,7 @@ public class AllocatableClusterResources { if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue; if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) continue; - var candidate = new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources), + var candidate = new AllocatableClusterResources(wantedResources.with(realResources), advertisedResources, wantedResources, clusterSpec); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java index b8a80a9bd2b..30432c1c078 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java @@ -66,15 +66,12 @@ public class AllocationOptimizer { groupsAdjustedForRedundancy, limits, target, current, clusterModel)); var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits, - clusterModel.cluster().required(), hosts, nodeRepository); - if (allocatableResources.isEmpty()) continue; if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get())) bestAllocation = allocatableResources; } } - return bestAllocation; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java index dcfb8fb7246..80c192f8353 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java @@ -212,7 +212,7 @@ public class Autoscaler { @Override public String toString() { return "autoscaling advice: " + - (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : " None"); + (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : "None"); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java index 1001ab83cc0..3c26eef41d9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java @@ -24,14 +24,19 @@ public class ClusterModel { private static final Logger log = Logger.getLogger(ClusterModel.class.getName()); - private static final Duration CURRENT_LOAD_DURATION = Duration.ofMinutes(5); + /** Containers typically use more cpu right after generation change, so discard those metrics */ + public static final Duration warmupDuration = Duration.ofSeconds(90); + + private static final Duration currentLoadDuration = Duration.ofMinutes(5); static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.65; - static final double idealDiskLoad = 0.6; + static final double idealContainerDiskLoad = 0.95; + static final double idealContentDiskLoad = 0.6; private final Application application; + private final ClusterSpec clusterSpec; private final Cluster cluster; /** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */ private final NodeList nodes; @@ -51,6 +56,7 @@ public class ClusterModel { MetricsDb metricsDb, Clock clock) { this.application = application; + this.clusterSpec = clusterSpec; this.cluster = cluster; this.nodes = clusterNodes; this.clock = clock; @@ -61,12 +67,14 @@ public class ClusterModel { /** For testing */ ClusterModel(Application application, + ClusterSpec clusterSpec, Cluster cluster, Clock clock, Duration scalingDuration, ClusterTimeseries clusterTimeseries, ClusterNodesTimeseries nodeTimeseries) { this.application = application; + this.clusterSpec = clusterSpec; this.cluster = cluster; this.nodes = null; this.clock = clock; @@ -76,6 +84,8 @@ public class ClusterModel { this.nodeTimeseries = nodeTimeseries; } + public Application application() { return application; } + public ClusterSpec clusterSpec() { return clusterSpec; } public Cluster cluster() { return cluster; } /** Returns the predicted duration of a rescaling of this cluster */ @@ -100,14 +110,14 @@ public class ClusterModel { return queryFractionOfMax = clusterTimeseries().queryFractionOfMax(scalingDuration(), clock); } - /** Returns average load during the last {@link #CURRENT_LOAD_DURATION} */ - public Load currentLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(CURRENT_LOAD_DURATION)); } + /** Returns average load during the last {@link #currentLoadDuration} */ + public Load currentLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(currentLoadDuration)); } /** Returns average load during the last {@link #scalingDuration()} */ public Load averageLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(scalingDuration())); } public Load idealLoad() { - return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad); + return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad()); } /** Ideal cpu load must take the application traffic fraction into account */ @@ -190,6 +200,12 @@ public class ClusterModel { return duration; } + private double idealDiskLoad() { + // Stateless clusters are not expected to consume more disk over time - + // if they do it is due to logs which will be rotated away right before the disk is full + return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad; + } + /** * Create a cluster model if possible and logs a warning and returns empty otherwise. * This is useful in cases where it's possible to continue without the cluser model, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java index f5657966e5f..5ad4ef2e263 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java @@ -10,6 +10,8 @@ import java.util.List; import java.util.function.Predicate; import java.util.stream.Collectors; +import static com.yahoo.vespa.hosted.provision.autoscale.ClusterModel.warmupDuration; + /** * A series of metric snapshots for the nodes of a cluster used to compute load * @@ -24,13 +26,18 @@ public class ClusterNodesTimeseries { public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clusterNodes, MetricsDb db) { this.clusterNodes = clusterNodes; - var timeseries = db.getNodeTimeseries(period, clusterNodes); - - if (cluster.lastScalingEvent().isPresent()) - timeseries = filter(timeseries, snapshot -> snapshot.generation() < 0 || // Content nodes do not yet send generation - snapshot.generation() >= cluster.lastScalingEvent().get().generation()); - timeseries = filter(timeseries, snapshot -> snapshot.inService() && snapshot.stable()); + // See warmupSeconds*4 into the past to see any generation change in it + // If none can be detected we assume the node is new/was down. + // If either this is the case, or there is a generation change, we ignore + // the first warmupWindow metrics + var timeseries = db.getNodeTimeseries(period.plus(warmupDuration.multipliedBy(4)), clusterNodes); + if (cluster.lastScalingEvent().isPresent()) { + long currentGeneration = cluster.lastScalingEvent().get().generation(); + timeseries = keepCurrentGenerationAfterWarmup(timeseries, currentGeneration); + } + timeseries = keep(timeseries, snapshot -> snapshot.inService() && snapshot.stable()); + timeseries = keep(timeseries, snapshot -> ! snapshot.at().isBefore(db.clock().instant().minus(period))); this.timeseries = timeseries; } @@ -62,8 +69,15 @@ public class ClusterNodesTimeseries { return total.divide(count); } - private List<NodeTimeseries> filter(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) { - return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.filter(filter)).collect(Collectors.toList()); + private static List<NodeTimeseries> keep(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) { + return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.keep(filter)).collect(Collectors.toList()); + } + + private static List<NodeTimeseries> keepCurrentGenerationAfterWarmup(List<NodeTimeseries> timeseries, + long currentGeneration) { + return timeseries.stream() + .map(nodeTimeseries -> nodeTimeseries.keepCurrentGenerationAfterWarmup(currentGeneration)) + .collect(Collectors.toList()); } public static ClusterNodesTimeseries empty() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java index 96896bb1ba0..131873b0137 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java @@ -47,16 +47,19 @@ public class ClusterTimeseries { /** * The max query growth rate we can predict from this time-series as a fraction of the average traffic in the window + * + * @return the predicted max growth of the query rate, per minute as a fraction of the current load */ public double maxQueryGrowthRate(Duration window, Clock clock) { if (snapshots.isEmpty()) return 0.1; // Find the period having the highest growth rate, where total growth exceeds 30% increase - double maxGrowthRate = 0; // In query rate per minute + double maxGrowthRate = 0; // In query rate growth per second (to get good resolution) + for (int start = 0; start < snapshots.size(); start++) { if (start > 0) { // Optimization: Skip this point when starting from the previous is better relative to the best rate so far Duration duration = durationBetween(start - 1, start); - if (duration.toMinutes() != 0) { - double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes(); + if (duration.toSeconds() != 0) { + double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toSeconds(); if (growthRate >= maxGrowthRate) continue; } @@ -64,8 +67,8 @@ public class ClusterTimeseries { for (int end = start + 1; end < snapshots.size(); end++) { if (queryRateAt(end) >= queryRateAt(start) * 1.3) { Duration duration = durationBetween(start, end); - if (duration.toMinutes() == 0) continue; - double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes(); + if (duration.toSeconds() == 0) continue; + double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toSeconds(); if (growthRate > maxGrowthRate) maxGrowthRate = growthRate; } @@ -79,7 +82,7 @@ public class ClusterTimeseries { } OptionalDouble queryRate = queryRate(window, clock); if (queryRate.orElse(0) == 0) return 0.1; // Growth not expressible as a fraction of the current rate - return maxGrowthRate / queryRate.getAsDouble(); + return maxGrowthRate * 60 / queryRate.getAsDouble(); } /** diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java index d9544b334ea..9eefd4e60b7 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java @@ -4,8 +4,6 @@ package com.yahoo.vespa.hosted.provision.autoscale; import com.yahoo.collections.Pair; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeRepository; import java.time.Clock; import java.time.Duration; @@ -15,7 +13,6 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -73,10 +70,10 @@ public class MemoryMetricsDb implements MetricsDb { Instant startTime = clock().instant().minus(period); synchronized (lock) { if (hostnames.isEmpty()) - return nodeTimeseries.values().stream().map(ns -> ns.justAfter(startTime)).collect(Collectors.toList()); + return nodeTimeseries.values().stream().map(ns -> ns.keepAfter(startTime)).collect(Collectors.toList()); else return hostnames.stream() - .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).justAfter(startTime)) + .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).keepAfter(startTime)) .collect(Collectors.toList()); } } @@ -94,7 +91,7 @@ public class MemoryMetricsDb implements MetricsDb { // 12 hours with 1k nodes and 3 resources and 1 measurement/sec is about 5Gb for (String hostname : nodeTimeseries.keySet()) { var timeseries = nodeTimeseries.get(hostname); - timeseries = timeseries.justAfter(clock().instant().minus(Autoscaler.maxScalingWindow())); + timeseries = timeseries.keepAfter(clock().instant().minus(Autoscaler.maxScalingWindow())); if (timeseries.isEmpty()) nodeTimeseries.remove(hostname); else diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java index 864df9a16c4..4a5f8972e11 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java @@ -9,6 +9,8 @@ import java.util.Optional; import java.util.function.Predicate; import java.util.stream.Collectors; +import static com.yahoo.vespa.hosted.provision.autoscale.ClusterModel.warmupDuration; + /** * A list of metric snapshots from a node, sorted by increasing time (newest last). * @@ -48,15 +50,42 @@ public class NodeTimeseries { return new NodeTimeseries(hostname(), list); } - public NodeTimeseries filter(Predicate<NodeMetricSnapshot> filter) { - return new NodeTimeseries(hostname, snapshots.stream().filter(filter).collect(Collectors.toList())); + /** Returns the instant this changed to the given generation, or empty if no *change* to this generation is present */ + private Optional<Instant> generationChange(long targetGeneration) { + if (snapshots.isEmpty()) return Optional.empty(); + if (snapshots.get(0).generation() == targetGeneration) return Optional.of(snapshots.get(0).at()); + for (NodeMetricSnapshot snapshot : snapshots) { + if (snapshot.generation() == targetGeneration) + return Optional.of(snapshot.at()); + } + return Optional.empty(); + } + + public NodeTimeseries keep(Predicate<NodeMetricSnapshot> filter) { + return new NodeTimeseries(hostname, snapshots.stream() + .filter(snapshot -> filter.test(snapshot)) + .collect(Collectors.toList())); } - public NodeTimeseries justAfter(Instant oldestTime) { + public NodeTimeseries keepAfter(Instant oldestTime) { return new NodeTimeseries(hostname, snapshots.stream() .filter(snapshot -> snapshot.at().equals(oldestTime) || snapshot.at().isAfter(oldestTime)) .collect(Collectors.toList())); } + public NodeTimeseries keepCurrentGenerationAfterWarmup(long currentGeneration) { + Optional<Instant> generationChange = generationChange(currentGeneration); + return keep(snapshot -> isOnCurrentGenerationAfterWarmup(snapshot, currentGeneration, generationChange)); + } + + private boolean isOnCurrentGenerationAfterWarmup(NodeMetricSnapshot snapshot, + long currentGeneration, + Optional<Instant> generationChange) { + if (snapshot.generation() < 0) return true; // Content nodes do not yet send generation + if (snapshot.generation() < currentGeneration) return false; + if (generationChange.isEmpty()) return true; + return ! snapshot.at().isBefore(generationChange.get().plus(warmupDuration)); + } + } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java index 3b74533772b..fbc3d236421 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java @@ -79,7 +79,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer { allocatedNodes(lb.id()).isEmpty(), lb -> { try { attempts.add(1); - log.log(Level.INFO, () -> "Removing expired inactive load balancer " + lb.id()); + log.log(Level.INFO, () -> "Removing expired inactive " + lb.id()); service.remove(lb.id().application(), lb.id().cluster()); db.removeLoadBalancer(lb.id()); } catch (Exception e){ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java index 6c103627ad4..57db874fb84 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java @@ -59,7 +59,7 @@ public abstract class NodeMover<MOVE> extends NodeRepositoryMaintainer { protected final MOVE findBestMove(NodesAndHosts<? extends NodeList> allNodes) { HostCapacity capacity = new HostCapacity(allNodes, nodeRepository().resourcesCalculator()); MOVE bestMove = emptyMove; - // Shuffle nodes so we did not get stuck if the chosen move is consistently discarded. Node moves happen through + // Shuffle nodes to not get stuck if the chosen move is consistently discarded. Node moves happen through // a soft request to retire (preferToRetire), which node allocation can disregard NodeList activeNodes = allNodes.nodes().nodeType(NodeType.tenant) .state(Node.State.active) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java index 7bea671fbac..f01e8ecd301 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java @@ -16,6 +16,7 @@ import java.time.Duration; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.logging.Logger; /** * Ensure that nodes within a cluster a spread across hosts on exclusive network switches. @@ -24,6 +25,8 @@ import java.util.Set; */ public class SwitchRebalancer extends NodeMover<Move> { + private static final Logger LOG = Logger.getLogger(SwitchRebalancer.class.getName()); + private final Metric metric; private final Deployer deployer; @@ -40,7 +43,12 @@ public class SwitchRebalancer extends NodeMover<Move> { NodesAndHosts<NodeList> allNodes = NodesAndHosts.create(nodeRepository().nodes().list()); // Lockless as strong consistency is not needed if (!zoneIsStable(allNodes.nodes())) return 1.0; - findBestMove(allNodes).execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); + Move bestMove = findBestMove(allNodes); + if (!bestMove.isEmpty()) { + LOG.info("Trying " + bestMove + " (" + bestMove.fromHost().switchHostname().orElse("<none>") + + " -> " + bestMove.toHost().switchHostname().orElse("<none>") + ")"); + } + bestMove.execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository()); return 1.0; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java index 8c421443a65..1c3d3f5c489 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java @@ -20,6 +20,7 @@ public class NodeResourcesSerializer { private static final String storageTypeKey = "storageType"; static void toSlime(NodeResources resources, Cursor resourcesObject) { + if (resources.isUnspecified()) return; resourcesObject.setDouble(vcpuKey, resources.vcpu()); resourcesObject.setDouble(memoryKey, resources.memoryGb()); resourcesObject.setDouble(diskKey, resources.diskGb()); @@ -29,6 +30,7 @@ public class NodeResourcesSerializer { } static NodeResources resourcesFromSlime(Inspector resources) { + if ( ! resources.field(vcpuKey).valid()) return NodeResources.unspecified(); return new NodeResources(resources.field(vcpuKey).asDouble(), resources.field(memoryKey).asDouble(), resources.field(diskKey).asDouble(), diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java index 0d32b21016c..8c358301b85 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java @@ -113,7 +113,8 @@ class Activator { var cluster = modified.cluster(clusterEntry.getKey()).get(); var previousResources = oldNodes.cluster(clusterEntry.getKey()).toResources(); var currentResources = clusterEntry.getValue().toResources(); - if ( ! previousResources.justNumbers().equals(currentResources.justNumbers())) { + if ( previousResources.nodeResources().isUnspecified() + || ! previousResources.justNumbers().equals(currentResources.justNumbers())) { cluster = cluster.with(ScalingEvent.create(previousResources, currentResources, generation, at)); } if (cluster.targetResources().isPresent() diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java index 0c2c3c48df1..4088d717a67 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.provisioning; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.NodeResources; @@ -29,10 +30,21 @@ public class CapacityPolicies { this.sharedHosts = type -> PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value().isEnabled(type.name()); } - public int decideSize(int requested, boolean required, boolean canFail, boolean isTester, ClusterSpec cluster) { + public Capacity applyOn(Capacity capacity, ApplicationId application) { + return capacity.withLimits(applyOn(capacity.minResources(), capacity, application), + applyOn(capacity.maxResources(), capacity, application)); + } + + private ClusterResources applyOn(ClusterResources resources, Capacity capacity, ApplicationId application) { + int nodes = decideSize(resources.nodes(), capacity.isRequired(), application.instance().isTester()); + int groups = Math.min(resources.groups(), nodes); // cannot have more groups than nodes + var nodeResources = decideNodeResources(resources.nodeResources(), capacity.isRequired()); + return new ClusterResources(nodes, groups, nodeResources); + } + + private int decideSize(int requested, boolean required, boolean isTester) { if (isTester) return 1; - ensureRedundancy(requested, cluster, canFail); if (required) return requested; switch(zone.environment()) { case dev : case test : return 1; @@ -43,11 +55,9 @@ public class CapacityPolicies { } } - public NodeResources decideNodeResources(NodeResources target, boolean required, ClusterSpec cluster) { - if (target.isUnspecified()) - target = defaultNodeResources(cluster.type()); - + private NodeResources decideNodeResources(NodeResources target, boolean required) { if (required) return target; + if (target.isUnspecified()) return target; // Cannot be modified // Dev does not cap the cpu or network of containers since usage is spotty: Allocate just a small amount exclusively if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning()) @@ -77,28 +87,11 @@ public class CapacityPolicies { } /** - * Whether or not the nodes requested can share physical host with other applications. + * Returns whether the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(Capacity capacity, boolean requestedExclusivity) { return requestedExclusivity && (capacity.isRequired() || zone.environment() == Environment.prod); } - /** - * Throw if the node count is 1 for container and content clusters and we're in a production zone - * - * @throws IllegalArgumentException if only one node is requested and we can fail - */ - private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail) { - if (canFail && - nodeCount == 1 && - requiresRedundancy(cluster.type()) && - zone.environment().isProduction()) - throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster); - } - - private static boolean requiresRedundancy(ClusterSpec.Type clusterType) { - return clusterType.isContent() || clusterType.isContainer(); - } - } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index ba46f0a9535..2d93763c631 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -71,7 +71,7 @@ public class GroupPreparer { // Try preparing in memory without global unallocated lock. Most of the time there should be no changes and we // can return nodes previously allocated. NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes, - indices::probeNext, wantedGroups, allNodesAndHosts); + indices::probeNext, wantedGroups, allNodesAndHosts); if (probeAllocation.fulfilledAndNoChanges()) { List<Node> acceptedNodes = probeAllocation.finalNodes(); surplusActiveNodes.removeAll(acceptedNodes); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java index 7cc4acc20b0..6c22a26d88a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java @@ -99,12 +99,12 @@ class NodeAllocation { * Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily * reject allocated nodes due to index duplicates. * - * @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc. + * @param candidates the nodes which are potentially on offer. These may belong to a different application etc. * @return the subset of offeredNodes which was accepted, with the correct allocation assigned */ - List<Node> offer(List<NodeCandidate> nodesPrioritized) { + List<Node> offer(List<NodeCandidate> candidates) { List<Node> accepted = new ArrayList<>(); - for (NodeCandidate candidate : nodesPrioritized) { + for (NodeCandidate candidate : candidates) { if (candidate.allocation().isPresent()) { Allocation allocation = candidate.allocation().get(); ClusterMembership membership = allocation.membership(); @@ -121,7 +121,7 @@ class NodeAllocation { if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) { candidate = candidate.withNode(); if (candidate.isValid()) - accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable)); + accepted.add(acceptNode(candidate, shouldRetire(candidate, candidates), resizeable)); } } else if (! saturated() && hasCompatibleFlavor(candidate)) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java index 4f0ae688b1c..62ac1f0d0e6 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java @@ -238,7 +238,6 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; - NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index b35b0a5e301..7d15a2b30b1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -84,8 +84,8 @@ public class NodeRepositoryProvisioner implements Provisioner { @Override public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) { - log.log(Level.FINE, () -> "Received deploy prepare request for " + requested + - " for application " + application + ", cluster " + cluster); + log.log(Level.FINE, "Received deploy prepare request for " + requested + + " for application " + application + ", cluster " + cluster); if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); @@ -96,21 +96,21 @@ public class NodeRepositoryProvisioner implements Provisioner { NodeResources resources; NodeSpec nodeSpec; if (requested.type() == NodeType.tenant) { - ClusterResources target = decideTargetResources(application, cluster, requested); - int nodeCount = capacityPolicies.decideSize(target.nodes(), - requested.isRequired(), - requested.canFail(), - application.instance().isTester(), - cluster); - groups = Math.min(target.groups(), nodeCount); // cannot have more groups than nodes - resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested.isRequired(), cluster); - boolean exclusive = capacityPolicies.decideExclusivity(requested, cluster.isExclusive()); - nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail()); - logIfDownscaled(target.nodes(), nodeCount, cluster, logger); + var actual = capacityPolicies.applyOn(requested, application); + ClusterResources target = decideTargetResources(application, cluster, actual); + boolean exclusive = capacityPolicies.decideExclusivity(actual, cluster.isExclusive()); + ensureRedundancy(target.nodes(), cluster, actual.canFail(), application); + logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger); + + groups = target.groups(); + resources = target.nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type()) + : target.nodeResources(); + nodeSpec = NodeSpec.from(target.nodes(), resources, exclusive, actual.canFail()); } else { groups = 1; // type request with multiple groups is not supported - resources = requested.minResources().nodeResources(); + resources = requested.minResources().nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type()) + : requested.minResources().nodeResources(); nodeSpec = NodeSpec.from(requested.type()); } return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources); @@ -164,12 +164,20 @@ public class NodeRepositoryProvisioner implements Provisioner { boolean firstDeployment = nodes.isEmpty(); AllocatableClusterResources currentResources = firstDeployment // start at min, preserve current resources otherwise - ? new AllocatableClusterResources(requested.minResources(), clusterSpec, nodeRepository) + ? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec), clusterSpec, nodeRepository) : new AllocatableClusterResources(nodes.asList(), nodeRepository); var clusterModel = new ClusterModel(application, cluster, clusterSpec, nodes, nodeRepository.metricsDb(), nodeRepository.clock()); return within(Limits.of(requested), currentResources, firstDeployment, clusterModel); } + private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec) { + var initial = requested.minResources(); + if (initial.nodeResources().isUnspecified()) + initial = initial.with(capacityPolicies.defaultNodeResources(clusterSpec.type())); + return initial; + } + + /** Make the minimal adjustments needed to the current resources to stay within the limits */ private ClusterResources within(Limits limits, AllocatableClusterResources current, @@ -190,10 +198,28 @@ public class NodeRepositoryProvisioner implements Provisioner { .advertisedResources(); } - private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) { - if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes) - logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster + - ", downscaling to " + actualNodes + " nodes in " + zone.environment()); + /** + * Throw if the node count is 1 for container and content clusters and we're in a production zone + * + * @throws IllegalArgumentException if only one node is requested and we can fail + */ + private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail, ApplicationId application) { + if (! application.instance().isTester() && + canFail && + nodeCount == 1 && + requiresRedundancy(cluster.type()) && + zone.environment().isProduction()) + throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster); + } + + private static boolean requiresRedundancy(ClusterSpec.Type clusterType) { + return clusterType.isContent() || clusterType.isContainer(); + } + + private void logIfDownscaled(int requestedMinNodes, int actualMinNodes, ClusterSpec cluster, ProvisionLogger logger) { + if (zone.environment().isManuallyDeployed() && actualMinNodes < requestedMinNodes) + logger.log(Level.INFO, "Requested " + requestedMinNodes + " nodes for " + cluster + + ", downscaling to " + actualMinNodes + " nodes in " + zone.environment()); } private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 282b0d96cf4..b12368b2834 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -10,7 +10,6 @@ import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.NodesAndHosts; -import com.yahoo.vespa.hosted.provision.node.Nodes; import java.util.ArrayList; import java.util.List; @@ -25,13 +24,11 @@ import java.util.stream.Collectors; */ class Preparer { - private final NodeRepository nodeRepository; private final GroupPreparer groupPreparer; private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; public Preparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) { - this.nodeRepository = nodeRepository; this.loadBalancerProvisioner = loadBalancerProvisioner; this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner); } @@ -69,9 +66,10 @@ class Preparer { for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) { ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex))); - GroupPreparer.PrepareResult result = groupPreparer.prepare( - application, clusterGroup, requestedNodes.fraction(wantedGroups), - surplusNodes, indices, wantedGroups, allNodesAndHosts); + GroupPreparer.PrepareResult result = groupPreparer.prepare(application, clusterGroup, + requestedNodes.fraction(wantedGroups), + surplusNodes, indices, wantedGroups, + allNodesAndHosts); allNodesAndHosts = result.allNodesAndHosts; // Might have changed List<Node> accepted = result.prepared; if (requestedNodes.rejectNonActiveParent()) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index 601a7109533..a04a3828f13 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -15,6 +15,7 @@ import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.Nodelike; import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies; @@ -51,10 +52,10 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 5, 1, hostResources); tester.clock().advance(Duration.ofDays(1)); - assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.addCpuMeasurements(0.25f, 1f, 59, application1); - assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.clock().advance(Duration.ofDays(1)); tester.addCpuMeasurements(0.25f, 1f, 120, application1); @@ -62,10 +63,10 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high", 15, 1, 1.2, 28.6, 28.6, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); - assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -74,19 +75,19 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only assertTrue("Load change is large, but insufficient measurements for new config -> No change", - tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); tester.addCpuMeasurements(0.19f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only - assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), capacity).target()); + assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1, capacity).target()); tester.addCpuMeasurements(0.1f, 1f, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down to minimum since usage has gone down significantly", 7, 1, 1.0, 66.7, 66.7, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents(); } @@ -109,8 +110,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high", - 7, 1, 2.5, 80.0, 80.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 7, 1, 2.5, 80.0, 50.5, + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -119,8 +120,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down since cpu usage has gone down", - 4, 1, 2.5, 68.6, 68.6, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 4, 1, 2.5, 68.6, 27.4, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -147,7 +148,7 @@ public class AutoscalingTest { var capacity = Capacity.from(min, max); ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high", 14, 1, 1.4, 30.8, 30.8, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); assertEquals("Disk speed from min/max is used", NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed()); tester.deploy(application1, cluster1, scaledResources); @@ -180,7 +181,7 @@ public class AutoscalingTest { // Autoscaling: Uses disk-speed any as well tester.clock().advance(Duration.ofDays(2)); tester.addCpuMeasurements(0.8f, 1f, 120, application1); - Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), capacity); + Autoscaler.Advice advice = tester.autoscale(application1, cluster1, capacity); assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed()); @@ -204,8 +205,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up to limit since resource usage is too high", - 6, 1, 2.4, 78.0, 79.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 6, 1, 2.4, 78.0, 70.0, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -224,7 +225,7 @@ public class AutoscalingTest { tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1); tester.assertResources("Scaling down to limit since resource usage is low", 4, 1, 1.8, 7.7, 10.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -245,13 +246,14 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, Capacity.from(min, max)); tester.assertResources("Min number of nodes and default resources", 2, 1, defaultResources, - Optional.of(tester.nodeRepository().nodes().list().owner(application1).toResources())); + tester.nodeRepository().nodes().list().owner(application1).toResources()); tester.addMeasurements(0.25f, 0.95f, 0.95f, 0, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up to limit since resource usage is too high", - 4, 1, defaultResources, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 4, 1, + defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(), + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -272,7 +274,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", 6, 6, 3.6, 8.0, 10.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -290,7 +292,7 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 5, 1, resources); tester.clock().advance(Duration.ofDays(1)); tester.addCpuMeasurements(0.25f, 1f, 120, application1); - assertTrue(tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + assertTrue(tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test @@ -314,15 +316,15 @@ public class AutoscalingTest { // deploy tester.deploy(application1, cluster1, 3, 1, min.nodeResources()); - tester.addDiskMeasurements(0.01f, 1f, 120, application1); - tester.clock().advance(Duration.ofMinutes(-10 * 5)); + Duration timeAdded = tester.addDiskMeasurements(0.01f, 1f, 120, application1); + tester.clock().advance(timeAdded.negated()); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> 10.0); // Query traffic only - Optional<ClusterResources> suggestion = tester.suggest(application1, cluster1.id(), min, max).target(); + Autoscaler.Advice suggestion = tester.suggest(application1, cluster1.id(), min, max); tester.assertResources("Choosing the remote disk flavor as it has less disk", 6, 1, 3.0, 100.0, 10.0, suggestion); assertEquals("Choosing the remote disk flavor as it has less disk", - StorageType.remote, suggestion.get().nodeResources().storageType()); + StorageType.remote, suggestion.target().get().nodeResources().storageType()); } @Test @@ -341,8 +343,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", - 7, 1, 2.5, 80.0, 80.0, - tester.suggest(application1, cluster1.id(), min, max).target()); + 7, 1, 2.5, 80.0, 50.5, + tester.suggest(application1, cluster1.id(), min, max)); } @Test @@ -360,7 +362,7 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 2, 1, resources); tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1); assertTrue("Not scaling up since nodes were measured while cluster was unstable", - tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test @@ -378,7 +380,7 @@ public class AutoscalingTest { tester.deploy(application1, cluster1, 2, 1, resources); tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1); assertTrue("Not scaling up since nodes were measured while cluster was unstable", - tester.autoscale(application1, cluster1.id(), capacity).isEmpty()); + tester.autoscale(application1, cluster1, capacity).isEmpty()); } @Test @@ -398,8 +400,8 @@ public class AutoscalingTest { tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up since resource usage is too high", - 7, 7, 2.5, 80.0, 80.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 7, 7, 2.5, 80.0, 50.5, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -421,8 +423,8 @@ public class AutoscalingTest { t -> t == 0 ? 20.0 : 10.0, t -> 1.0); tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper", - 8, 1, 2.6, 83.3, 83.3, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 8, 1, 2.6, 83.3, 52.6, + tester.autoscale(application1, cluster1, capacity)); } /** Same as above but mostly write traffic, which favors smaller groups */ @@ -445,8 +447,8 @@ public class AutoscalingTest { t -> t == 0 ? 20.0 : 10.0, t -> 100.0); tester.assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper", - 4, 1, 2.1, 83.3, 83.3, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 4, 1, 2.1, 83.3, 52.6, + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -468,7 +470,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Increase group size to reduce memory load", 8, 2, 12.4, 96.2, 62.5, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -489,7 +491,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.02f, 0.95f, 120, application1); tester.assertResources("Scaling down", 6, 1, 2.9, 4.0, 95.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -509,7 +511,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.02f, 0.95f, 120, application1); tester.clock().advance(Duration.ofMinutes(-10 * 5)); tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only - assertTrue(tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty()); + assertTrue(tester.autoscale(application1, cluster1, capacity).target().isEmpty()); // Trying the same later causes autoscaling tester.clock().advance(Duration.ofDays(2)); @@ -518,7 +520,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down", 6, 1, 1.4, 4.0, 95.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -529,7 +531,8 @@ public class AutoscalingTest { var capacity = Capacity.from(min, max); { // No memory tax - AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources, + AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")), + hostResources, new OnlySubtractingWhenForecastingCalculator(0)); ApplicationId application1 = tester.applicationId("app1"); @@ -541,11 +544,12 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up", 4, 1, 6.7, 20.5, 200, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } { // 15 Gb memory tax - AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources, + AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")), + hostResources, new OnlySubtractingWhenForecastingCalculator(15)); ApplicationId application1 = tester.applicationId("app1"); @@ -557,7 +561,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling up", 4, 1, 6.7, 35.5, 200, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } } @@ -588,7 +592,7 @@ public class AutoscalingTest { tester.addMemMeasurements(0.9f, 0.6f, 120, application1); ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.", 8, 1, 3, 83, 34.3, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.deploy(application1, cluster1, scaledResources); tester.deactivateRetired(application1, cluster1, scaledResources); @@ -599,7 +603,7 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only tester.assertResources("Scaling down since resource usage has gone down", 5, 1, 3, 83, 36.0, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -621,18 +625,17 @@ public class AutoscalingTest { // (no read share stored) tester.assertResources("Advice to scale up since we set aside for bcp by default", 7, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.storeReadShare(0.25, 0.5, application1); tester.assertResources("Half of global share is the same as the default assumption used above", 7, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.storeReadShare(0.5, 0.5, application1); tester.assertResources("Advice to scale down since we don't need room for bcp", 4, 1, 3, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); - + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -649,36 +652,36 @@ public class AutoscalingTest { ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); tester.deploy(application1, cluster1, 5, 1, midResources); - tester.addQueryRateMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + Duration timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); // (no query rate data) tester.assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data", 5, 1, 6.3, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5)); - tester.addQueryRateMeasurements(application1, cluster1.id(), - 100, - t -> 10.0 + (t < 50 ? t : 100 - t)); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), + 100, + t -> 10.0 + (t < 50 ? t : 100 - t)); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); tester.assertResources("Scale down since observed growth is slower than scaling time", 5, 1, 3.4, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.clearQueryRateMeasurements(application1, cluster1.id()); tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(60)); - tester.addQueryRateMeasurements(application1, cluster1.id(), - 100, - t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))); - tester.clock().advance(Duration.ofMinutes(-100 * 5)); - tester.addCpuMeasurements(0.25f, 1f, 100, application1); + timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(), + 100, + t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))); + tester.clock().advance(timeAdded.negated()); + tester.addCpuMeasurements(0.25f, 1f, 200, application1); tester.assertResources("Scale up since observed growth is faster than scaling time", 5, 1, 10.0, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test @@ -705,63 +708,63 @@ public class AutoscalingTest { tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0); tester.assertResources("Query and write load is equal -> scale up somewhat", 5, 1, 7.3, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0); tester.assertResources("Query load is 4x write load -> scale up more", 5, 1, 9.5, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.3f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0); tester.assertResources("Write load is 10x query load -> scale down", 5, 1, 2.9, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0); tester.assertResources("Query only -> largest possible", 5, 1, 10.0, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); tester.addCpuMeasurements(0.4f, 1f, 100, application1); tester.clock().advance(Duration.ofMinutes(-100 * 5)); tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> 0.0, t -> 10.0); tester.assertResources("Write only -> smallest possible", 5, 1, 2.1, 100, 100, - tester.autoscale(application1, cluster1.id(), capacity).target()); + tester.autoscale(application1, cluster1, capacity)); } @Test - public void test_cd_autoscaling_test() { + public void test_autoscaling_in_dev() { NodeResources resources = new NodeResources(1, 4, 50, 1); - ClusterResources min = new ClusterResources( 2, 1, resources); + ClusterResources min = new ClusterResources( 1, 1, resources); ClusterResources max = new ClusterResources(3, 1, resources); - var capacity = Capacity.from(min, max); - AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2)); + Capacity capacity = Capacity.from(min, max, false, true); + + AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); - tester.deploy(application1, cluster1, 2, 1, resources); + tester.deploy(application1, cluster1, capacity); tester.addQueryRateMeasurements(application1, cluster1.id(), - 500, t -> 0.0); - tester.addCpuMeasurements(0.5f, 1f, 10, application1); - - tester.assertResources("Advice to scale up since observed growth is much faster than scaling time", - 3, 1, 1, 4, 50, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 500, t -> 100.0); + tester.addCpuMeasurements(1.0f, 1f, 10, application1); + assertTrue("Not attempting to scale up because policies dictate we'll only get one node", + tester.autoscale(application1, cluster1, capacity).target().isEmpty()); } + /** Same setup as test_autoscaling_in_dev(), just with required = true */ @Test - public void test_autoscaling_in_dev() { + public void test_autoscaling_in_dev_with_required_resources() { NodeResources resources = new NodeResources(1, 4, 50, 1); ClusterResources min = new ClusterResources( 1, 1, resources); ClusterResources max = new ClusterResources(3, 1, resources); - Capacity capacity = Capacity.from(min, max, false, true); + Capacity capacity = Capacity.from(min, max, true, true); AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2)); ApplicationId application1 = tester.applicationId("application1"); @@ -771,19 +774,20 @@ public class AutoscalingTest { tester.addQueryRateMeasurements(application1, cluster1.id(), 500, t -> 100.0); tester.addCpuMeasurements(1.0f, 1f, 10, application1); - assertTrue("Not attempting to scale up because policies dictate we'll only get one node", - tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty()); + tester.assertResources("We scale up even in dev because resources are required", + 3, 1, 1.0, 4, 50, + tester.autoscale(application1, cluster1, capacity)); } - /** Same setup as test_autoscaling_in_dev(), just with required = true */ @Test - public void test_autoscaling_in_dev_with_required_resources() { - NodeResources resources = new NodeResources(1, 4, 50, 1); + public void test_autoscaling_in_dev_with_required_unspecified_resources() { + NodeResources resources = NodeResources.unspecified(); ClusterResources min = new ClusterResources( 1, 1, resources); ClusterResources max = new ClusterResources(3, 1, resources); Capacity capacity = Capacity.from(min, max, true, true); - AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2)); + AutoscalingTester tester = new AutoscalingTester(Environment.dev, + new NodeResources(10, 16, 100, 2)); ApplicationId application1 = tester.applicationId("application1"); ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1"); @@ -792,8 +796,8 @@ public class AutoscalingTest { 500, t -> 100.0); tester.addCpuMeasurements(1.0f, 1f, 10, application1); tester.assertResources("We scale up even in dev because resources are required", - 3, 1, 1.0, 4, 50, - tester.autoscale(application1, cluster1.id(), capacity).target()); + 3, 1, 1.5, 8, 50, + tester.autoscale(application1, cluster1, capacity)); } /** diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java index 8d59181a027..8586704a426 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java @@ -24,10 +24,12 @@ import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.IP; +import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies; import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Map; import java.util.Optional; @@ -45,6 +47,7 @@ class AutoscalingTester { private final ProvisioningTester provisioningTester; private final Autoscaler autoscaler; private final MockHostResourcesCalculator hostResourcesCalculator; + private final CapacityPolicies capacityPolicies; /** Creates an autoscaling tester with a single host type ready */ public AutoscalingTester(NodeResources hostResources) { @@ -52,11 +55,15 @@ class AutoscalingTester { } public AutoscalingTester(Environment environment, NodeResources hostResources) { - this(environment, hostResources, null); + this(new Zone(environment, RegionName.from("us-east")), hostResources, null); } - public AutoscalingTester(Environment environment, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) { - this(new Zone(environment, RegionName.from("us-east")), List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator); + public AutoscalingTester(Zone zone, NodeResources hostResources) { + this(zone, hostResources, null); + } + + public AutoscalingTester(Zone zone, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) { + this(zone, List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator); provisioningTester.makeReadyNodes(20, "hostFlavor", NodeType.host, 8); provisioningTester.activateTenantHosts(); } @@ -75,6 +82,7 @@ class AutoscalingTester { hostResourcesCalculator = new MockHostResourcesCalculator(zone); autoscaler = new Autoscaler(nodeRepository()); + capacityPolicies = new CapacityPolicies(provisioningTester.nodeRepository()); } public ProvisioningTester provisioning() { return provisioningTester; } @@ -138,11 +146,11 @@ class AutoscalingTester { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { - clock().advance(Duration.ofMinutes(5)); + clock().advance(Duration.ofSeconds(150)); for (Node node : nodes) { Load load = new Load(value, ClusterModel.idealMemoryLoad * otherResourcesLoad, - ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); + ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), load, @@ -163,16 +171,18 @@ class AutoscalingTester { * @param otherResourcesLoad the load factor relative to ideal to use for other resources * @param count the number of measurements * @param applicationId the application we're adding measurements for all nodes of + * @return the duration added to the current time by this */ - public void addDiskMeasurements(float value, float otherResourcesLoad, - int count, ApplicationId applicationId) { + public Duration addDiskMeasurements(float value, float otherResourcesLoad, + int count, ApplicationId applicationId) { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); + Instant initialTime = clock().instant(); for (int i = 0; i < count; i++) { - clock().advance(Duration.ofMinutes(5)); + clock().advance(Duration.ofSeconds(150)); for (Node node : nodes) { Load load = new Load(ClusterModel.idealQueryCpuLoad * otherResourcesLoad, - ClusterModel.idealDiskLoad * otherResourcesLoad, + ClusterModel.idealContentDiskLoad * otherResourcesLoad, value).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), @@ -183,6 +193,7 @@ class AutoscalingTester { 0.0)))); } } + return Duration.between(initialTime, clock().instant()); } /** @@ -204,10 +215,10 @@ class AutoscalingTester { for (Node node : nodes) { float cpu = (float) 0.2 * otherResourcesLoad * oneExtraNodeFactor; float memory = value * oneExtraNodeFactor; - float disk = (float) ClusterModel.idealDiskLoad * otherResourcesLoad * oneExtraNodeFactor; + float disk = (float) ClusterModel.idealContentDiskLoad * otherResourcesLoad * oneExtraNodeFactor; Load load = new Load(0.2 * otherResourcesLoad, value, - ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); + ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor); nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), new NodeMetricSnapshot(clock().instant(), load, @@ -283,10 +294,11 @@ class AutoscalingTester { } /** Creates the given number of measurements, spaced 5 minutes between, using the given function */ - public void addQueryRateMeasurements(ApplicationId application, - ClusterSpec.Id cluster, - int measurements, - IntFunction<Double> queryRate) { + public Duration addQueryRateMeasurements(ApplicationId application, + ClusterSpec.Id cluster, + int measurements, + IntFunction<Double> queryRate) { + Instant initialTime = clock().instant(); for (int i = 0; i < measurements; i++) { nodeMetricsDb().addClusterMetrics(application, Map.of(cluster, new ClusterMetricSnapshot(clock().instant(), @@ -294,19 +306,21 @@ class AutoscalingTester { 0.0))); clock().advance(Duration.ofMinutes(5)); } + return Duration.between(initialTime, clock().instant()); } public void clearQueryRateMeasurements(ApplicationId application, ClusterSpec.Id cluster) { ((MemoryMetricsDb)nodeMetricsDb()).clearClusterMetrics(application, cluster); } - public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity capacity) { + public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) { + capacity = capacityPolicies.applyOn(capacity, applicationId); Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId)) - .withCluster(clusterId, false, capacity); + .withCluster(cluster.id(), false, capacity); try (Mutex lock = nodeRepository().nodes().lock(applicationId)) { nodeRepository().applications().put(application, lock); } - return autoscaler.autoscale(application, application.clusters().get(clusterId), + return autoscaler.autoscale(application, application.clusters().get(cluster.id()), nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } @@ -321,28 +335,37 @@ class AutoscalingTester { nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } - public ClusterResources assertResources(String message, - int nodeCount, int groupCount, - NodeResources expectedResources, - Optional<ClusterResources> resources) { - return assertResources(message, nodeCount, groupCount, - expectedResources.vcpu(), expectedResources.memoryGb(), expectedResources.diskGb(), - resources); + public void assertResources(String message, + int nodeCount, int groupCount, + NodeResources expectedResources, + ClusterResources resources) { + assertResources(message, nodeCount, groupCount, + expectedResources.vcpu(), expectedResources.memoryGb(), expectedResources.diskGb(), + resources); } public ClusterResources assertResources(String message, int nodeCount, int groupCount, double approxCpu, double approxMemory, double approxDisk, - Optional<ClusterResources> resources) { + Autoscaler.Advice advice) { + assertTrue("Resources are present: " + message + " (" + advice + ": " + advice.reason() + ")", + advice.target().isPresent()); + var resources = advice.target().get(); + assertResources(message, nodeCount, groupCount, approxCpu, approxMemory, approxDisk, resources); + return resources; + } + + public void assertResources(String message, + int nodeCount, int groupCount, + double approxCpu, double approxMemory, double approxDisk, + ClusterResources resources) { double delta = 0.0000000001; - assertTrue("Resources are present: " + message, resources.isPresent()); - NodeResources nodeResources = resources.get().nodeResources(); - assertEquals("Node count in " + resources.get() + ": " + message, nodeCount, resources.get().nodes()); - assertEquals("Group count in " + resources.get() + ": " + message, groupCount, resources.get().groups()); - assertEquals("Cpu in " + resources.get() + ": " + message, approxCpu, Math.round(nodeResources.vcpu() * 10) / 10.0, delta); - assertEquals("Memory in " + resources.get() + ": " + message, approxMemory, Math.round(nodeResources.memoryGb() * 10) / 10.0, delta); - assertEquals("Disk in: " + resources.get() + ": " + message, approxDisk, Math.round(nodeResources.diskGb() * 10) / 10.0, delta); - return resources.get(); + NodeResources nodeResources = resources.nodeResources(); + assertEquals("Node count in " + resources + ": " + message, nodeCount, resources.nodes()); + assertEquals("Group count in " + resources+ ": " + message, groupCount, resources.groups()); + assertEquals("Cpu in " + resources + ": " + message, approxCpu, Math.round(nodeResources.vcpu() * 10) / 10.0, delta); + assertEquals("Memory in " + resources + ": " + message, approxMemory, Math.round(nodeResources.memoryGb() * 10) / 10.0, delta); + assertEquals("Disk in: " + resources + ": " + message, approxDisk, Math.round(nodeResources.diskGb() * 10) / 10.0, delta); } public ManualClock clock() { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java index bd7300ad6bf..516a7a92d04 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java @@ -30,19 +30,20 @@ public class ClusterModelTest { public void test_traffic_headroom() { ManualClock clock = new ManualClock(); Application application = Application.empty(ApplicationId.from("t1", "a1", "i1")); + ClusterSpec clusterSpec = clusterSpec(); Cluster cluster = cluster(new NodeResources(1, 10, 100, 1)); application = application.with(cluster); // No current traffic share: Ideal load is low but capped var model1 = new ClusterModel(application.with(new Status(0.0, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.131, model1.idealLoad().cpu(), delta); // Almost no current traffic share: Ideal load is low but capped var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.131, model2.idealLoad().cpu(), delta); @@ -53,24 +54,32 @@ public class ClusterModelTest { ManualClock clock = new ManualClock(); Application application = Application.empty(ApplicationId.from("t1", "a1", "i1")); + ClusterSpec clusterSpec = clusterSpec(); Cluster cluster = cluster(new NodeResources(1, 10, 100, 1)); application = application.with(cluster); // No current traffic: Ideal load is low but capped var model1 = new ClusterModel(application, - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.275, model1.idealLoad().cpu(), delta); // Almost no current traffic: Ideal load is low but capped var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)), - cluster, clock, Duration.ofMinutes(10), + clusterSpec, cluster, clock, Duration.ofMinutes(10), timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0001, t -> 0.0, clock), ClusterNodesTimeseries.empty()); assertEquals(0.040, model2.idealLoad().cpu(), delta); } + private ClusterSpec clusterSpec() { + return ClusterSpec.specification(ClusterSpec.Type.content, ClusterSpec.Id.from("test")) + .group(ClusterSpec.Group.from(0)) + .vespaVersion("7.1.1") + .build(); + } + private Cluster cluster(NodeResources resources) { return Cluster.create(ClusterSpec.Id.from("test"), false, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java index 4bda7b137a0..d9037181f59 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java @@ -5,8 +5,13 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; +import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; import com.yahoo.vespa.hosted.provision.testutils.MockDeployer; @@ -84,11 +89,9 @@ public class AutoscalingMaintainerTest { tester.deploy(app1, cluster1, app1Capacity); // Measure overload - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.9f, 0.9f, 0.9f, 0, 500, app1); // Causes autoscaling - tester.clock().advance(Duration.ofSeconds(1)); tester.clock().advance(Duration.ofMinutes(10)); Instant firstMaintenanceTime = tester.clock().instant(); tester.maintainer().maintain(); @@ -103,16 +106,12 @@ public class AutoscalingMaintainerTest { assertEquals(firstMaintenanceTime.toEpochMilli(), events.get(1).at().toEpochMilli()); // Measure overload still, since change is not applied, but metrics are discarded - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.9f, 0.9f, 0.9f, 0, 500, app1); - tester.clock().advance(Duration.ofSeconds(1)); tester.maintainer().maintain(); assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); // Measure underload, but no autoscaling since we still haven't measured we're on the new config generation - tester.clock().advance(Duration.ofSeconds(1)); tester.addMeasurements(0.1f, 0.1f, 0.1f, 0, 500, app1); - tester.clock().advance(Duration.ofSeconds(1)); tester.maintainer().maintain(); assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); @@ -121,8 +120,9 @@ public class AutoscalingMaintainerTest { tester.clock().advance(Duration.ofMinutes(5)); tester.addMeasurements(0.1f, 0.1f, 0.1f, 1, 1, app1); tester.maintainer().maintain(); + assertEquals(firstMaintenanceTime.toEpochMilli(), tester.deployer().lastDeployTime(app1).get().toEpochMilli()); // - measure underload - tester.clock().advance(Duration.ofHours(1)); + tester.clock().advance(Duration.ofDays(4)); // Exit cooling period tester.addMeasurements(0.1f, 0.1f, 0.1f, 1, 500, app1); Instant lastMaintenanceTime = tester.clock().instant(); tester.maintainer().maintain(); @@ -191,7 +191,6 @@ public class AutoscalingMaintainerTest { var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, app1Capacity)); ManualClock clock = tester.clock(); - // deploy tester.deploy(app1, cluster1, app1Capacity); autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester); @@ -199,6 +198,77 @@ public class AutoscalingMaintainerTest { autoscale( true, Duration.ofMinutes(40), Duration.ofMinutes(20), clock, app1, cluster1, tester); } + @Test + public void test_autoscaling_ignores_high_cpu_right_after_generation_change() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + NodeResources resources = new NodeResources(4, 4, 10, 1); + ClusterResources min = new ClusterResources(2, 1, resources); + ClusterResources max = new ClusterResources(20, 1, resources); + var capacity = Capacity.from(min, max); + var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + + tester.deploy(app1, cluster1, capacity); + // fast completion + tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); + tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); + tester.maintainer().maintain(); + assertEquals("Scale up: " + tester.cluster(app1, cluster1).autoscalingStatus(), + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + + // fast completion, with initially overloaded cpu + tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 1, app1); + tester.addMeasurements(0.2f, 0.3f, 0.3f, 1, 1, app1); + tester.maintainer().maintain(); + assertEquals("No autoscaling since we ignore the (first) data point in the warup period", + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + } + + @Test + public void test_cd_autoscaling_test() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + NodeResources resources = new NodeResources(1, 4, 50, 1); + ClusterResources min = new ClusterResources( 2, 1, resources); + ClusterResources max = new ClusterResources(3, 1, resources); + var capacity = Capacity.from(min, max); + var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")), + new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + ManualClock clock = tester.clock(); + + tester.deploy(app1, cluster1, capacity); + assertEquals(2, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + + autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester); + assertEquals(3, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + } + + @Test + public void test_cd_test_not_specifying_node_resources() { + ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); + ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); + ClusterResources resources = new ClusterResources( 2, 1, NodeResources.unspecified()); + var capacity = Capacity.from(resources); + var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")), + new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + tester.deploy(app1, cluster1, capacity); // Deploy should succeed and allocate the nodes + assertEquals(2, + tester.nodeRepository().nodes().list(Node.State.active) + .owner(app1) + .cluster(cluster1.id()) + .size()); + } + private void autoscale(boolean down, Duration completionTime, Duration expectedWindow, ManualClock clock, ApplicationId application, ClusterSpec cluster, AutoscalingMaintainerTester tester) { @@ -209,7 +279,7 @@ public class AutoscalingMaintainerTest { clock.advance(completionTime); float load = down ? 0.1f : 1.0f; - tester.addMeasurements(load, load, load, generation, 200, application); + tester.addMeasurements(load, load, load, generation, 1, application); tester.maintainer().maintain(); assertEvent("Measured completion of the last scaling event, but no new autoscaling yet", generation, Optional.of(clock.instant()), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java index e36bd5e70bc..e1a1a2af5fb 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java @@ -42,9 +42,11 @@ public class AutoscalingMaintainerTester { private final MockDeployer deployer; public AutoscalingMaintainerTester(MockDeployer.ApplicationContext ... appContexts) { - provisioningTester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))) - .flavorsConfig(flavorsConfig()) - .build(); + this(new Zone(Environment.prod, RegionName.from("us-east3")), appContexts); + } + + public AutoscalingMaintainerTester(Zone zone, MockDeployer.ApplicationContext ... appContexts) { + provisioningTester = new ProvisioningTester.Builder().zone(zone).flavorsConfig(flavorsConfig()).build(); provisioningTester.clock().setInstant(Instant.ofEpochMilli(0)); Map<ApplicationId, MockDeployer.ApplicationContext> apps = Arrays.stream(appContexts) .collect(Collectors.toMap(c -> c.id(), c -> c)); @@ -69,8 +71,9 @@ public class AutoscalingMaintainerTester { return provisioningTester.deploy(application, cluster, capacity); } - public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { + public Duration addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); + Instant startTime = clock().instant(); for (int i = 0; i < count; i++) { for (Node node : nodes) nodeRepository().metricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), @@ -80,7 +83,9 @@ public class AutoscalingMaintainerTester { true, true, 0.0)))); + clock().advance(Duration.ofSeconds(150)); } + return Duration.between(startTime, clock().instant()); } /** Creates the given number of measurements, spaced 5 minutes between, using the given function */ @@ -102,7 +107,7 @@ public class AutoscalingMaintainerTester { private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); - b.addFlavor("flt", 30, 30, 40, 3, Flavor.Type.BARE_METAL); + b.addFlavor("flt", 30, 30, 50, 3, Flavor.Type.BARE_METAL); b.addFlavor("cpu", 40, 20, 40, 3, Flavor.Type.BARE_METAL); b.addFlavor("mem", 20, 40, 40, 3, Flavor.Type.BARE_METAL); return b.build(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java index 316655e11fb..7ce26354739 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java @@ -458,7 +458,7 @@ public class DynamicProvisioningMaintainerTest { // Provision config servers for (int i = 0; i < provisionedHosts.size(); i++) { - tester.makeReadyChildren(1, i + 1, NodeResources.unspecified(), hostType.childNodeType(), + tester.makeReadyChildren(1, i + 1, new NodeResources(1.5, 8, 50, 0.3), hostType.childNodeType(), provisionedHosts.get(i).hostname(), (nodeIndex) -> "cfg" + nodeIndex); } tester.prepareAndActivateInfraApplication(configSrvApp, hostType.childNodeType()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java index 03b41412896..b51f4403756 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java @@ -13,6 +13,7 @@ import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.Zone; import com.yahoo.config.provisioning.FlavorsConfig; +import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; @@ -24,6 +25,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; import org.junit.Test; import java.time.Duration; +import java.time.Instant; import java.util.List; import java.util.Optional; @@ -41,14 +43,13 @@ public class ScalingSuggestionsMaintainerTest { @Test public void testScalingSuggestionsMaintainer() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))).flavorsConfig(flavorsConfig()).build(); - + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3"))) + .flavorsConfig(flavorsConfig()) + .build(); ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ProvisioningTester.containerClusterSpec(); - ApplicationId app2 = ProvisioningTester.applicationId("app2"); + ClusterSpec cluster1 = ProvisioningTester.containerClusterSpec(); ClusterSpec cluster2 = ProvisioningTester.contentClusterSpec(); - tester.makeReadyNodes(20, "flt", NodeType.host, 8); tester.activateTenantHosts(); @@ -60,7 +61,8 @@ public class ScalingSuggestionsMaintainerTest { false, true)); tester.clock().advance(Duration.ofHours(13)); - addMeasurements(0.90f, 0.90f, 0.90f, 0, 500, app1, tester.nodeRepository()); + Duration timeAdded = addMeasurements(0.90f, 0.90f, 0.90f, 0, 500, app1, tester.nodeRepository()); + tester.clock().advance(timeAdded.negated()); addMeasurements(0.99f, 0.99f, 0.99f, 0, 500, app2, tester.nodeRepository()); ScalingSuggestionsMaintainer maintainer = new ScalingSuggestionsMaintainer(tester.nodeRepository(), @@ -68,7 +70,7 @@ public class ScalingSuggestionsMaintainerTest { new TestMetric()); maintainer.maintain(); - assertEquals("11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]", + assertEquals("12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app1, cluster1, tester).get().resources().toString()); assertEquals("8 nodes with [vcpu: 11.0, memory: 4.4 Gb, disk 11.8 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app2, cluster2, tester).get().resources().toString()); @@ -78,7 +80,7 @@ public class ScalingSuggestionsMaintainerTest { addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository()); maintainer.maintain(); assertEquals("Suggestion stays at the peak value observed", - "11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]", + "12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]", suggestionOf(app1, cluster1, tester).get().resources().toString()); // Utilization is still way down and a week has passed tester.clock().advance(Duration.ofDays(7)); @@ -114,10 +116,11 @@ public class ScalingSuggestionsMaintainerTest { .shouldSuggestResources(currentResources); } - public void addMeasurements(float cpu, float memory, float disk, int generation, int count, - ApplicationId applicationId, - NodeRepository nodeRepository) { + public Duration addMeasurements(float cpu, float memory, float disk, int generation, int count, + ApplicationId applicationId, + NodeRepository nodeRepository) { NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId); + Instant startTime = nodeRepository.clock().instant(); for (int i = 0; i < count; i++) { for (Node node : nodes) nodeRepository.metricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(), @@ -127,7 +130,9 @@ public class ScalingSuggestionsMaintainerTest { true, true, 0.0)))); + ((ManualClock)nodeRepository.clock()).advance(Duration.ofSeconds(150)); } + return Duration.between(startTime, nodeRepository.clock().instant()); } private FlavorsConfig flavorsConfig() { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index db165aae919..95f25612dd7 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -523,7 +523,7 @@ public class ProvisioningTest { ApplicationId application = ProvisioningTester.applicationId(); tester.makeReadyHosts(10, defaultResources).activateTenantHosts(); - prepare(application, 1, 2, 3, 3, defaultResources, tester); + prepare(application, 1, 1, 1, 1, defaultResources, tester); } @Test @@ -1015,10 +1015,10 @@ public class ProvisioningTest { allHosts.addAll(content1); Function<Integer, Capacity> capacity = count -> Capacity.from(new ClusterResources(count, 1, NodeResources.unspecified()), required, true); - int expectedContainer0Size = tester.decideSize(container0Size, capacity.apply(container0Size), containerCluster0, application); - int expectedContainer1Size = tester.decideSize(container1Size, capacity.apply(container1Size), containerCluster1, application); - int expectedContent0Size = tester.decideSize(content0Size, capacity.apply(content0Size), contentCluster0, application); - int expectedContent1Size = tester.decideSize(content1Size, capacity.apply(content1Size), contentCluster1, application); + int expectedContainer0Size = tester.decideSize(capacity.apply(container0Size), application); + int expectedContainer1Size = tester.decideSize(capacity.apply(container1Size), application); + int expectedContent0Size = tester.decideSize(capacity.apply(content0Size), application); + int expectedContent1Size = tester.decideSize(capacity.apply(content1Size), application); assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct", expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index 6ca93671087..c478840780f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -152,8 +152,8 @@ public class ProvisioningTester { public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); } public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); } - public int decideSize(int size, Capacity capacity, ClusterSpec cluster, ApplicationId application) { - return capacityPolicies.decideSize(size, capacity.isRequired(), capacity.canFail(), application.instance().isTester(), cluster); + public int decideSize(Capacity capacity, ApplicationId application) { + return capacityPolicies.applyOn(capacity, application).minResources().nodes(); } public Node patchNode(Node node, UnaryOperator<Node> patcher) { @@ -493,6 +493,7 @@ public class ProvisioningTester { public List<Node> makeReadyNodes(int n, Flavor flavor, Optional<TenantName> reservedTo, NodeType type, int ipAddressPoolSize, boolean dualStack) { List<Node> nodes = makeProvisionedNodes(n, flavor, reservedTo, type, ipAddressPoolSize, dualStack); nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName()); + nodes.forEach(node -> { if (node.resources().isUnspecified()) throw new IllegalArgumentException(); }); return nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json index 689b6f3816b..fcdcdf1a8ca 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json @@ -72,7 +72,7 @@ "idealMemory": 0.65, "currentMemory": 0.0, "disk" : 0.0, - "idealDisk": 0.6, + "idealDisk": 0.95, "currentDisk": 0.0 }, "scalingEvents" : [ @@ -133,6 +133,7 @@ <module>vespa-athenz</module> <module>vespa-documentgen-plugin</module> <module>vespa-feed-client</module> + <module>vespa-feed-client-api</module> <module>vespa-feed-client-cli</module> <module>vespa-hadoop</module> <module>vespa-http-client</module> diff --git a/screwdriver.yaml b/screwdriver.yaml index 6ea74ae8a1d..14d9902d335 100644 --- a/screwdriver.yaml +++ b/screwdriver.yaml @@ -60,7 +60,7 @@ jobs: environment: LOCAL_MVN_REPO: "/tmp/vespa/mvnrepo" - VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.javadoc.skip=true -Dmaven.source.skip=true" + VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.source.skip=true" CCACHE_TMP_DIR: "/tmp/ccache_tmp" CCACHE_DATA_DIR: "/tmp/vespa/ccache" MAIN_CACHE_FILE: "/main_job_cache/vespa.tar" diff --git a/searchcommon/src/vespa/searchcommon/attribute/config.h b/searchcommon/src/vespa/searchcommon/attribute/config.h index e6a428e5843..f572f5038fc 100644 --- a/searchcommon/src/vespa/searchcommon/attribute/config.h +++ b/searchcommon/src/vespa/searchcommon/attribute/config.h @@ -6,10 +6,10 @@ #include "collectiontype.h" #include "hnsw_index_params.h" #include "predicate_params.h" -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchcommon/common/growstrategy.h> #include <vespa/searchcommon/common/dictionary_config.h> #include <vespa/eval/eval/value_type.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <cassert> #include <optional> @@ -23,6 +23,7 @@ namespace search::attribute { class Config { public: enum class Match { CASED, UNCASED }; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; Config() noexcept; Config(BasicType bt) noexcept : Config(bt, CollectionType::SINGLE) { } Config(BasicType bt, CollectionType ct) noexcept : Config(bt, ct, false) { } diff --git a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt index 77e638d7193..6cc02ae7884 100644 --- a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt +++ b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt @@ -1,7 +1,6 @@ # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(searchcommon_searchcommon_common OBJECT SOURCES - compaction_strategy.cpp datatype.cpp dictionary_config.cpp growstrategy.cpp diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp b/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp deleted file mode 100644 index 22f50ba3049..00000000000 --- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "compaction_strategy.h" -#include <iostream> -namespace search { - -std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy) -{ - os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() << - ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() << - "}"; - return os; -} - -} diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp index 3397b424ed0..1de56802484 100644 --- a/searchcore/src/tests/proton/attribute/attribute_test.cpp +++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp @@ -82,6 +82,7 @@ using std::string; using vespalib::ForegroundTaskExecutor; using vespalib::ForegroundThreadExecutor; using vespalib::SequencedTaskExecutorObserver; +using vespalib::datastore::CompactionStrategy; using vespalib::eval::SimpleValue; using vespalib::eval::TensorSpec; using vespalib::eval::Value; @@ -541,7 +542,7 @@ public: AttributeCollectionSpecFactory _factory; AttributeCollectionSpecTest(bool fastAccessOnly) : _builder(), - _factory(AllocStrategy(search::GrowStrategy(), search::CompactionStrategy(), 100), fastAccessOnly) + _factory(AllocStrategy(search::GrowStrategy(), CompactionStrategy(), 100), fastAccessOnly) { addAttribute("a1", false); addAttribute("a2", true); diff --git a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp index 1483a0bd653..59503464222 100644 --- a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp +++ b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp @@ -7,8 +7,8 @@ using proton::AllocConfig; using proton::AllocStrategy; using proton::SubDbType; -using search::CompactionStrategy; using search::GrowStrategy; +using vespalib::datastore::CompactionStrategy; namespace { diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp index 73160d1db94..4825dc6e8a7 100644 --- a/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp +++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp @@ -49,19 +49,19 @@ struct Fixture } LidSet retval; for (uint32_t i = 0; i < count; ++i) { - uint32_t lid = next(compactLidLimit, false); + uint32_t lid = next(compactLidLimit); retval.insert(lid); EXPECT_TRUE(_itr->valid() || lid <= compactLidLimit); } - EXPECT_EQUAL(0u, next(compactLidLimit, false)); + EXPECT_EQUAL(0u, next(compactLidLimit)); EXPECT_FALSE(_itr->valid()); return retval; } - uint32_t next(uint32_t compactLidLimit, bool retry = false) { + uint32_t next(uint32_t compactLidLimit) { if (!_itr) { _itr = std::make_unique<DocumentScanIterator>(_metaStore); } - return _itr->next(compactLidLimit, retry).lid; + return _itr->next(compactLidLimit).lid; } }; @@ -82,14 +82,6 @@ TEST_F("require that only lids > lid limit are returned", Fixture) assertLidSet({5,6,7,8}, f.scan(4, 4)); } -TEST_F("require that we start scan at previous doc if retry is set", Fixture) -{ - f.add({1,2,3,4,5,6,7,8}); - uint32_t lid1 = f.next(4, false); - uint32_t lid2 = f.next(4, true); - EXPECT_EQUAL(lid1, lid2); -} - TEST_MAIN() { TEST_RUN_ALL(); diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp index 7f28ccd0737..1851455e321 100644 --- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp +++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp @@ -60,6 +60,7 @@ using storage::spi::Timestamp; using vespa::config::search::core::ProtonConfig; using vespa::config::content::core::BucketspacesConfig; using vespalib::mkdir; +using vespalib::datastore::CompactionStrategy; using proton::index::IndexConfig; typedef StoreOnlyDocSubDB::Config StoreOnlyConfig; @@ -564,7 +565,7 @@ TEST_F("require that attribute manager can be reconfigured", SearchableFixture) TEST_F("require that subdb reflect retirement", FastAccessFixture) { - search::CompactionStrategy cfg(0.1, 0.3); + CompactionStrategy cfg(0.1, 0.3); EXPECT_FALSE(f._subDb.isNodeRetired()); auto unretired_cfg = f._subDb.computeCompactionStrategy(cfg); @@ -576,7 +577,7 @@ TEST_F("require that subdb reflect retirement", FastAccessFixture) EXPECT_TRUE(f._subDb.isNodeRetired()); auto retired_cfg = f._subDb.computeCompactionStrategy(cfg); EXPECT_TRUE(cfg != retired_cfg); - EXPECT_TRUE(search::CompactionStrategy(0.5, 0.5) == retired_cfg); + EXPECT_TRUE(CompactionStrategy(0.5, 0.5) == retired_cfg); calc->setNodeRetired(false); f.setBucketStateCalculator(calc); @@ -586,8 +587,8 @@ TEST_F("require that subdb reflect retirement", FastAccessFixture) } TEST_F("require that attribute compaction config reflect retirement", FastAccessFixture) { - search::CompactionStrategy default_cfg(0.05, 0.2); - search::CompactionStrategy retired_cfg(0.5, 0.5); + CompactionStrategy default_cfg(0.05, 0.2); + CompactionStrategy retired_cfg(0.5, 0.5); auto guard = f._subDb.getAttributeManager()->getAttribute("attr1"); EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy()); @@ -702,29 +703,31 @@ assertTarget(const vespalib::string &name, TEST_F("require that flush targets can be retrieved", FastAccessFixture) { IFlushTarget::List targets = getFlushTargets(f); - EXPECT_EQUAL(7u, targets.size()); + EXPECT_EQUAL(8u, targets.size()); EXPECT_EQUAL("subdb.attribute.flush.attr1", targets[0]->getName()); EXPECT_EQUAL("subdb.attribute.shrink.attr1", targets[1]->getName()); EXPECT_EQUAL("subdb.documentmetastore.flush", targets[2]->getName()); EXPECT_EQUAL("subdb.documentmetastore.shrink", targets[3]->getName()); - EXPECT_EQUAL("subdb.summary.compact", targets[4]->getName()); - EXPECT_EQUAL("subdb.summary.flush", targets[5]->getName()); - EXPECT_EQUAL("subdb.summary.shrink", targets[6]->getName()); + EXPECT_EQUAL("subdb.summary.compact_bloat", targets[4]->getName()); + EXPECT_EQUAL("subdb.summary.compact_spread", targets[5]->getName()); + EXPECT_EQUAL("subdb.summary.flush", targets[6]->getName()); + EXPECT_EQUAL("subdb.summary.shrink", targets[7]->getName()); } TEST_F("require that flush targets can be retrieved", SearchableFixture) { IFlushTarget::List targets = getFlushTargets(f); - EXPECT_EQUAL(9u, targets.size()); + EXPECT_EQUAL(10u, targets.size()); EXPECT_TRUE(assertTarget("subdb.attribute.flush.attr1", FType::SYNC, FComponent::ATTRIBUTE, *targets[0])); EXPECT_TRUE(assertTarget("subdb.attribute.shrink.attr1", FType::GC, FComponent::ATTRIBUTE, *targets[1])); EXPECT_TRUE(assertTarget("subdb.documentmetastore.flush", FType::SYNC, FComponent::ATTRIBUTE, *targets[2])); EXPECT_TRUE(assertTarget("subdb.documentmetastore.shrink", FType::GC, FComponent::ATTRIBUTE, *targets[3])); EXPECT_TRUE(assertTarget("subdb.memoryindex.flush", FType::FLUSH, FComponent::INDEX, *targets[4])); EXPECT_TRUE(assertTarget("subdb.memoryindex.fusion", FType::GC, FComponent::INDEX, *targets[5])); - EXPECT_TRUE(assertTarget("subdb.summary.compact", FType::GC, FComponent::DOCUMENT_STORE, *targets[6])); - EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[7])); - EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[8])); + EXPECT_TRUE(assertTarget("subdb.summary.compact_bloat", FType::GC, FComponent::DOCUMENT_STORE, *targets[6])); + EXPECT_TRUE(assertTarget("subdb.summary.compact_spread", FType::GC, FComponent::DOCUMENT_STORE, *targets[7])); + EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[8])); + EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[9])); } TEST_F("require that only fast-access attributes are instantiated", FastAccessOnlyFixture) diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp index 3abeaf37062..57989688a4f 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp @@ -19,8 +19,8 @@ MyScanIterator::valid() const { return _validItr; } -search::DocumentMetaData MyScanIterator::next(uint32_t compactLidLimit, bool retry) { - if (!retry && _itr != _lids.begin()) { +search::DocumentMetaData MyScanIterator::next(uint32_t compactLidLimit) { + if (_itr != _lids.begin()) { ++_itr; } for (; _itr != _lids.end() && (*_itr) <= compactLidLimit; ++_itr) {} diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h index 42976104836..b404fc6956a 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h @@ -15,6 +15,7 @@ #include <vespa/searchcore/proton/test/clusterstatehandler.h> #include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> #include <vespa/searchcore/proton/test/test.h> +#include <vespa/searchcore/proton/test/dummy_document_store.h> #include <vespa/vespalib/util/idestructorcallback.h> #include <vespa/searchlib/index/docbuilder.h> @@ -55,7 +56,7 @@ struct MyScanIterator : public IDocumentScanIterator { explicit MyScanIterator(const MyHandler & handler, const LidVector &lids); ~MyScanIterator() override; bool valid() const override; - search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) override; + search::DocumentMetaData next(uint32_t compactLidLimit) override; }; struct MyHandler : public ILidSpaceCompactionHandler { diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp index a2d4315dad5..c01a1a65c46 100644 --- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp +++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp @@ -250,6 +250,7 @@ TEST_F(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move unblockJob(1); assertRunToNotBlocked(); assertJobContext(4, 7, 3, 0, 0); + unblockJob(1); endScan().compact(); assertJobContext(4, 7, 3, 7, 1); sync(); diff --git a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp index 9a02331787e..6d3eaa30263 100644 --- a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp +++ b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp @@ -45,7 +45,7 @@ using search::TuneFileDocumentDB; using std::map; using vespalib::VarHolder; using search::GrowStrategy; -using search::CompactionStrategy; +using vespalib::datastore::CompactionStrategy; struct DoctypeFixture { using UP = std::unique_ptr<DoctypeFixture>; diff --git a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp index 3474a4297c7..516c31cb232 100644 --- a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp +++ b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp @@ -30,6 +30,10 @@ vespalib::string doc1("id:test:music::1"); } +TEST("control sizeof(PendingGidToLidChange)") { + EXPECT_EQUAL(48u, sizeof(PendingGidToLidChange)); +} + class ListenerStats { using lock_guard = std::lock_guard<std::mutex>; std::mutex _lock; diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp index 5c695f7b0f2..66be0737fe9 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp @@ -119,7 +119,7 @@ extractHeader(const vespalib::string &attrFileName) auto df = search::FileUtil::openFile(attrFileName + ".dat"); vespalib::FileHeader datHeader; datHeader.readFile(*df); - return AttributeHeader::extractTags(datHeader); + return AttributeHeader::extractTags(datHeader, attrFileName); } void diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp index 1c730b063f8..59804517d26 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp @@ -7,10 +7,10 @@ #include <future> using search::AttributeVector; -using search::CompactionStrategy; using search::GrowStrategy; using search::SerialNum; using vespa::config::search::AttributesConfig; +using vespalib::datastore::CompactionStrategy; namespace proton { diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp index 2863eab1443..52b367fd14b 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer.cpp @@ -549,7 +549,6 @@ public: for (auto lidToRemove : _lidsToRemove) { applyRemoveToAttribute(_serialNum, lidToRemove, attr, _onWriteDone); } - attr.commit(false); } } } diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp index 3664f67f8fb..69a2d4f3ea9 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp @@ -4,8 +4,8 @@ #include <vespa/searchcore/proton/common/subdbtype.h> #include <algorithm> -using search::CompactionStrategy; using search::GrowStrategy; +using vespalib::datastore::CompactionStrategy; namespace proton { diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp index cbe8309b031..32ac249f7e1 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp @@ -3,7 +3,6 @@ #include "alloc_strategy.h" #include <iostream> -using search::CompactionStrategy; using search::GrowStrategy; namespace proton { diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h index 4771a8637cd..9c6e24e2bfe 100644 --- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h +++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h @@ -2,8 +2,8 @@ #pragma once -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchcommon/common/growstrategy.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <iosfwd> namespace proton { @@ -14,14 +14,16 @@ namespace proton { */ class AllocStrategy { +public: + using CompactionStrategy = vespalib::datastore::CompactionStrategy; protected: const search::GrowStrategy _grow_strategy; - const search::CompactionStrategy _compaction_strategy; + const CompactionStrategy _compaction_strategy; const uint32_t _amortize_count; public: AllocStrategy(const search::GrowStrategy& grow_strategy, - const search::CompactionStrategy& compaction_strategy, + const CompactionStrategy& compaction_strategy, uint32_t amortize_count); AllocStrategy(); @@ -32,7 +34,7 @@ public: return !operator==(rhs); } const search::GrowStrategy& get_grow_strategy() const noexcept { return _grow_strategy; } - const search::CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; } + const CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; } uint32_t get_amortize_count() const noexcept { return _amortize_count; } }; diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp index 4e0cf3f9059..06bf8d0a8a6 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp @@ -9,21 +9,26 @@ using search::SerialNum; using vespalib::makeLambdaTask; using searchcorespi::FlushStats; using searchcorespi::IFlushTarget; +using searchcorespi::FlushTask; namespace proton { namespace { -class Compacter : public searchcorespi::FlushTask { +class Compacter : public FlushTask { private: IDocumentStore & _docStore; FlushStats & _stats; SerialNum _currSerial; + virtual void compact(IDocumentStore & docStore, SerialNum currSerial) const = 0; public: - Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) : - _docStore(docStore), _stats(stats), _currSerial(currSerial) {} + Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : _docStore(docStore), + _stats(stats), + _currSerial(currSerial) + {} void run() override { - _docStore.compact(_currSerial); + compact(_docStore, _currSerial); updateStats(); } void updateStats() { @@ -36,10 +41,32 @@ public: } }; +class CompactBloat : public Compacter { +public: + CompactBloat(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : Compacter(docStore, stats, currSerial) + {} +private: + void compact(IDocumentStore & docStore, SerialNum currSerial) const override { + docStore.compactBloat(currSerial); + } +}; + +class CompactSpread : public Compacter { +public: + CompactSpread(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) + : Compacter(docStore, stats, currSerial) + {} +private: + void compact(IDocumentStore & docStore, SerialNum currSerial) const override { + docStore.compactSpread(currSerial); + } +}; + } -SummaryCompactTarget::SummaryCompactTarget(vespalib::Executor & summaryService, IDocumentStore & docStore) - : IFlushTarget("summary.compact", Type::GC, Component::DOCUMENT_STORE), +SummaryGCTarget::SummaryGCTarget(const vespalib::string & name, vespalib::Executor & summaryService, IDocumentStore & docStore) + : IFlushTarget(name, Type::GC, Component::DOCUMENT_STORE), _summaryService(summaryService), _docStore(docStore), _lastStats() @@ -48,37 +75,69 @@ SummaryCompactTarget::SummaryCompactTarget(vespalib::Executor & summaryService, } IFlushTarget::MemoryGain -SummaryCompactTarget::getApproxMemoryGain() const +SummaryGCTarget::getApproxMemoryGain() const { return MemoryGain::noGain(_docStore.memoryUsed()); } IFlushTarget::DiskGain -SummaryCompactTarget::getApproxDiskGain() const +SummaryGCTarget::getApproxDiskGain() const { size_t total(_docStore.getDiskFootprint()); - return DiskGain(total, total - std::min(total, _docStore.getMaxCompactGain())); + return DiskGain(total, total - std::min(total, getBloat(_docStore))); } IFlushTarget::Time -SummaryCompactTarget::getLastFlushTime() const +SummaryGCTarget::getLastFlushTime() const { return vespalib::system_clock::now(); } SerialNum -SummaryCompactTarget::getFlushedSerialNum() const +SummaryGCTarget::getFlushedSerialNum() const { return _docStore.tentativeLastSyncToken(); } IFlushTarget::Task::UP -SummaryCompactTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>) +SummaryGCTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>) { std::promise<Task::UP> promise; std::future<Task::UP> future = promise.get_future(); - _summaryService.execute(makeLambdaTask([&]() { promise.set_value(std::make_unique<Compacter>(_docStore, _lastStats, currentSerial)); })); + _summaryService.execute(makeLambdaTask([this, &promise,currentSerial]() { + promise.set_value(create(_docStore, _lastStats, currentSerial)); + })); return future.get(); } +SummaryCompactBloatTarget::SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore) + : SummaryGCTarget("summary.compact_bloat", summaryService, docStore) +{ +} + +size_t +SummaryCompactBloatTarget::getBloat(const search::IDocumentStore & docStore) const { + return docStore.getDiskBloat(); +} + +FlushTask::UP +SummaryCompactBloatTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) { + return std::make_unique<CompactBloat>(docStore, stats, currSerial); +} + +SummaryCompactSpreadTarget::SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore) + : SummaryGCTarget("summary.compact_spread", summaryService, docStore) +{ +} + +size_t +SummaryCompactSpreadTarget::getBloat(const search::IDocumentStore & docStore) const { + return docStore.getMaxSpreadAsBloat(); +} + +FlushTask::UP +SummaryCompactSpreadTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) { + return std::make_unique<CompactSpread>(docStore, stats, currSerial); +} + } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h index c8035a544f2..083f763d8e6 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h @@ -12,16 +12,10 @@ namespace proton { /** * This class implements the IFlushTarget interface to proxy a summary manager. */ -class SummaryCompactTarget : public searchcorespi::IFlushTarget { -private: - using FlushStats = searchcorespi::FlushStats; - vespalib::Executor &_summaryService; - search::IDocumentStore & _docStore; - FlushStats _lastStats; - +class SummaryGCTarget : public searchcorespi::IFlushTarget { public: - SummaryCompactTarget(vespalib::Executor & summaryService, search::IDocumentStore & docStore); - + using FlushStats = searchcorespi::FlushStats; + using IDocumentStore = search::IDocumentStore; MemoryGain getApproxMemoryGain() const override; DiskGain getApproxDiskGain() const override; SerialNum getFlushedSerialNum() const override; @@ -31,6 +25,39 @@ public: FlushStats getLastFlushStats() const override { return _lastStats; } uint64_t getApproxBytesToWriteToDisk() const override { return 0; } +protected: + SummaryGCTarget(const vespalib::string &, vespalib::Executor & summaryService, IDocumentStore & docStore); +private: + + virtual size_t getBloat(const IDocumentStore & docStore) const = 0; + virtual Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) = 0; + + vespalib::Executor &_summaryService; + IDocumentStore & _docStore; + FlushStats _lastStats; +}; + +/** + * Implements target to compact away removed documents. Wasted disk space is cost factor used for prioritizing. + */ +class SummaryCompactBloatTarget : public SummaryGCTarget { +private: + size_t getBloat(const search::IDocumentStore & docStore) const override; + Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override; +public: + SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore); +}; + +/** + * Target to ensure bucket spread is kept low. The cost is reported as a potential gain in disk space as + * we do not have a concept for bucket spread. + */ +class SummaryCompactSpreadTarget : public SummaryGCTarget { +private: + size_t getBloat(const search::IDocumentStore & docStore) const override; + Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override; +public: + SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore); }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp index eaf5a907808..28a91e1444d 100644 --- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp @@ -200,7 +200,8 @@ SummaryManager::getFlushTargets(vespalib::Executor & summaryService) IFlushTarget::List ret; ret.push_back(std::make_shared<SummaryFlushTarget>(getBackingStore(), summaryService)); if (dynamic_cast<LogDocumentStore *>(_docStore.get()) != nullptr) { - ret.push_back(std::make_shared<SummaryCompactTarget>(summaryService, getBackingStore())); + ret.push_back(std::make_shared<SummaryCompactBloatTarget>(summaryService, getBackingStore())); + ret.push_back(std::make_shared<SummaryCompactSpreadTarget>(summaryService, getBackingStore())); } ret.push_back(createShrinkLidSpaceFlushTarget(summaryService, _docStore)); return ret; diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp index 3170654409b..28234730f7b 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp @@ -197,10 +197,7 @@ DocumentMetaStore::consider_compact_gid_to_lid_map() if (_gidToLidMap.getAllocator().getNodeStore().has_held_buffers()) { return false; } - auto &compaction_strategy = getConfig().getCompactionStrategy(); - size_t used_bytes = _cached_gid_to_lid_map_memory_usage.usedBytes(); - size_t dead_bytes = _cached_gid_to_lid_map_memory_usage.deadBytes(); - return compaction_strategy.should_compact_memory(used_bytes, dead_bytes); + return _should_compact_gid_to_lid_map; } void @@ -209,7 +206,7 @@ DocumentMetaStore::onCommit() if (consider_compact_gid_to_lid_map()) { incGeneration(); _changesSinceCommit = 0; - _gidToLidMap.compact_worst(); + _gidToLidMap.compact_worst(getConfig().getCompactionStrategy()); _gid_to_lid_map_write_itr_prepare_serial_num = 0u; _gid_to_lid_map_write_itr.begin(_gidToLidMap.getRoot()); incGeneration(); @@ -223,13 +220,15 @@ DocumentMetaStore::onCommit() void DocumentMetaStore::onUpdateStat() { + auto &compaction_strategy = getConfig().getCompactionStrategy(); vespalib::MemoryUsage usage = _metaDataStore.getMemoryUsage(); usage.incAllocatedBytesOnHold(getGenerationHolder().getHeldBytes()); size_t bvSize = _lidAlloc.getUsedLidsSize(); usage.incAllocatedBytes(bvSize); usage.incUsedBytes(bvSize); - _cached_gid_to_lid_map_memory_usage = _gidToLidMap.getMemoryUsage(); - usage.merge(_cached_gid_to_lid_map_memory_usage); + auto gid_to_lid_map_memory_usage = _gidToLidMap.getMemoryUsage(); + _should_compact_gid_to_lid_map = compaction_strategy.should_compact_memory(gid_to_lid_map_memory_usage); + usage.merge(gid_to_lid_map_memory_usage); // the free lists are not taken into account here updateStatistics(_metaDataStore.size(), _metaDataStore.size(), @@ -424,7 +423,7 @@ DocumentMetaStore::DocumentMetaStore(BucketDBOwnerSP bucketDB, _trackDocumentSizes(true), _changesSinceCommit(0), _op_listener(), - _cached_gid_to_lid_map_memory_usage() + _should_compact_gid_to_lid_map(false) { ensureSpace(0); // lid 0 is reserved setCommittedDocIdLimit(1u); // lid 0 is reserved diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h index d78e98713ff..9e4977c65e1 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h @@ -77,7 +77,7 @@ private: bool _trackDocumentSizes; size_t _changesSinceCommit; OperationListenerSP _op_listener; - vespalib::MemoryUsage _cached_gid_to_lid_map_memory_usage; + bool _should_compact_gid_to_lid_map; DocId getFreeLid(); DocId peekFreeLid(); diff --git a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp index 4265d4c7099..c0f27e729af 100644 --- a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.cpp @@ -23,12 +23,9 @@ DocumentScanIterator::valid() const } DocumentMetaData -DocumentScanIterator::next(uint32_t compactLidLimit, bool retry) +DocumentScanIterator::next(uint32_t compactLidLimit) { - if (!retry) { - --_lastLid; - } - for (; _lastLid > compactLidLimit; --_lastLid) { + for (--_lastLid; _lastLid > compactLidLimit; --_lastLid) { if (_metaStore.validLid(_lastLid)) { const RawDocumentMetaData &metaData = _metaStore.getRawMetaData(_lastLid); return DocumentMetaData(_lastLid, metaData.getTimestamp(), diff --git a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h index b7c5131171a..ebdeb902474 100644 --- a/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h +++ b/searchcore/src/vespa/searchcore/proton/server/document_scan_iterator.h @@ -21,7 +21,7 @@ private: public: DocumentScanIterator(const IDocumentMetaStore &_metaStore); bool valid() const override; - search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) override; + search::DocumentMetaData next(uint32_t compactLidLimit) override; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp index a9873a80d0e..2eb6b1b92f0 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp @@ -48,6 +48,7 @@ using search::DocumentStore; using search::WriteableFileChunk; using std::make_shared; using std::make_unique; +using vespalib::datastore::CompactionStrategy; using vespalib::make_string_short::fmt; @@ -197,7 +198,7 @@ getStoreConfig(const ProtonConfig::Summary::Cache & cache, const HwInfo & hwInfo } LogDocumentStore::Config -deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::Memory & flush, const HwInfo & hwInfo) { +deriveConfig(const ProtonConfig::Summary & summary, const HwInfo & hwInfo) { DocumentStore::Config config(getStoreConfig(summary.cache, hwInfo)); const ProtonConfig::Summary::Log & log(summary.log); const ProtonConfig::Summary::Log::Chunk & chunk(log.chunk); @@ -205,7 +206,6 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M LogDataStore::Config logConfig; logConfig.setMaxFileSize(log.maxfilesize) .setMaxNumLids(log.maxnumlids) - .setMaxDiskBloatFactor(std::min(flush.diskbloatfactor, flush.each.diskbloatfactor)) .setMaxBucketSpread(log.maxbucketspread).setMinFileSizeFactor(log.minfilesizefactor) .compactCompression(deriveCompression(log.compact.compression)) .setFileConfig(fileConfig).disableCrcOnRead(chunk.skipcrconread); @@ -213,7 +213,7 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M } search::LogDocumentStore::Config buildStoreConfig(const ProtonConfig & proton, const HwInfo & hwInfo) { - return deriveConfig(proton.summary, proton.flush.memory, hwInfo); + return deriveConfig(proton.summary, hwInfo); } using AttributesConfigSP = DocumentDBConfig::AttributesConfigSP; @@ -264,7 +264,7 @@ build_alloc_config(const ProtonConfig& proton_config, const vespalib::string& do auto& alloc_config = document_db_config_entry.allocation; auto& distribution_config = proton_config.distribution; search::GrowStrategy grow_strategy(alloc_config.initialnumdocs, alloc_config.growfactor, alloc_config.growbias, alloc_config.multivaluegrowfactor); - search::CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio); + CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio); return std::make_shared<const AllocConfig> (AllocStrategy(grow_strategy, compaction_strategy, alloc_config.amortizecount), distribution_config.redundancy, distribution_config.searchablecopies); diff --git a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp index 0db388d2644..f5665c47529 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.cpp @@ -1,6 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "feed_handler_stats.h" +#include <cassert> +#include <vespa/log/log.h> + +LOG_SETUP(".proton.server.feed_handler_stats"); namespace proton { @@ -66,4 +70,14 @@ FeedHandlerStats::reset_min_max() noexcept _max_latency.reset(); } +void +FeedOperationCounter::commitCompleted(size_t numOperations) { + assert(_commitsStarted > _commitsCompleted); + assert(_operationsStarted >= _operationsCompleted + numOperations); + _operationsCompleted += numOperations; + _commitsCompleted++; + LOG(spam, "%zu: onCommitDone(%zu) total=%zu left=%zu", + _commitsCompleted, numOperations, _operationsCompleted, operationsInFlight()); +} + } diff --git a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h index 9c8d1b9190b..db93c157046 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h +++ b/searchcore/src/vespa/searchcore/proton/server/feed_handler_stats.h @@ -36,4 +36,41 @@ public: const std::optional<double>& get_max_latency() noexcept { return _max_latency; } }; +/** + * Keeps track of feed operations started, completed and being committed. + * Also tracks started and completed commit operations. + */ +class FeedOperationCounter { +public: + FeedOperationCounter() + : _operationsStarted(0), + _operationsCompleted(0), + _operationsStartedAtLastCommitStart(0), + _commitsStarted(0), + _commitsCompleted(0) + {} + void startOperation() { ++_operationsStarted; } + void startCommit() { + _commitsStarted++; + _operationsStartedAtLastCommitStart = _operationsStarted; + } + + void commitCompleted(size_t numOperations); + + size_t operationsSinceLastCommitStart() const { + return _operationsStarted - _operationsStartedAtLastCommitStart; + } + size_t operationsInFlight() const { return _operationsStarted - _operationsCompleted; } + size_t commitsInFlight() const { return _commitsStarted - _commitsCompleted; } + bool shouldScheduleCommit() const { + return (operationsInFlight() > 0) && (commitsInFlight() == 0); + } +private: + size_t _operationsStarted; + size_t _operationsCompleted; + size_t _operationsStartedAtLastCommitStart; + size_t _commitsStarted; + size_t _commitsCompleted; +}; + } diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp index 0ac91870eab..8b99c39dd65 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp @@ -23,7 +23,6 @@ #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/util/lambdatask.h> #include <cassert> -#include <unistd.h> #include <vespa/log/log.h> LOG_SETUP(".proton.server.feedhandler"); @@ -99,7 +98,7 @@ TlsMgrWriter::sync(SerialNum syncTo) bool res = _tls_mgr.getSession()->sync(syncTo, syncedTo); if (!res) { LOG(debug, "Tls sync failed, retrying"); - sleep(1); + std::this_thread::sleep_for(100ms); continue; } if (syncedTo >= syncTo) { @@ -284,7 +283,7 @@ FeedHandler::performDeleteBucket(FeedToken token, DeleteBucketOperation &op) { _activeFeedView->handleDeleteBucket(op, token); // Delete bucket itself, should no longer have documents. _bucketDBHandler->handleDeleteBucket(op.getBucketId()); - + initiateCommit(vespalib::steady_clock::now()); } void @@ -416,11 +415,9 @@ FeedHandler::FeedHandler(IThreadingService &writeService, _tlsReplayProgress(), _serialNum(0), _prunedSerialNum(0), - _replay_end_serial_num(0u), - _prepare_serial_num(0u), - _numOperationsPendingCommit(0), - _numOperationsCompleted(0), - _numCommitsCompleted(0), + _replay_end_serial_num(0), + _prepare_serial_num(0), + _numOperations(), _delayedPrune(false), _feedLock(), _feedState(make_shared<InitState>(getDocTypeName())), @@ -519,34 +516,32 @@ FeedHandler::getTransactionLogReplayDone() const { } void -FeedHandler::onCommitDone(size_t numPendingAtStart, vespalib::steady_time start_time) { - assert(numPendingAtStart <= _numOperationsPendingCommit); - _numOperationsPendingCommit -= numPendingAtStart; - _numOperationsCompleted += numPendingAtStart; - _numCommitsCompleted++; - if (_numOperationsPendingCommit > 0) { +FeedHandler::onCommitDone(size_t numOperations, vespalib::steady_time start_time) { + _numOperations.commitCompleted(numOperations); + if (_numOperations.shouldScheduleCommit()) { enqueCommitTask(); } - LOG(spam, "%zu: onCommitDone(%zu) total=%zu left=%zu", - _numCommitsCompleted, numPendingAtStart, _numOperationsCompleted, _numOperationsPendingCommit); vespalib::steady_time now = vespalib::steady_clock::now(); auto latency = vespalib::to_s(now - start_time); std::lock_guard guard(_stats_lock); - _stats.add_commit(numPendingAtStart, latency); + _stats.add_commit(numOperations, latency); } void FeedHandler::enqueCommitTask() { - _writeService.master().execute(makeLambdaTask([this, start_time(vespalib::steady_clock::now())]() { initiateCommit(start_time); })); + _writeService.master().execute(makeLambdaTask([this, start_time(vespalib::steady_clock::now())]() { + initiateCommit(start_time); + })); } void FeedHandler::initiateCommit(vespalib::steady_time start_time) { auto onCommitDoneContext = std::make_shared<OnCommitDone>( _writeService.master(), - makeLambdaTask([this, numPendingAtStart=_numOperationsPendingCommit, start_time]() { - onCommitDone(numPendingAtStart, start_time); + makeLambdaTask([this, operations=_numOperations.operationsSinceLastCommitStart(), start_time]() { + onCommitDone(operations, start_time); })); auto commitResult = _tlsWriter->startCommit(onCommitDoneContext); + _numOperations.startCommit(); if (_activeFeedView) { using KeepAlivePair = vespalib::KeepAlive<std::pair<CommitResult, DoneCallback>>; auto pair = std::make_pair(std::move(commitResult), std::move(onCommitDoneContext)); @@ -560,7 +555,8 @@ FeedHandler::appendOperation(const FeedOperation &op, TlsWriter::DoneCallback on const_cast<FeedOperation &>(op).setSerialNum(inc_serial_num()); } _tlsWriter->appendOperation(op, std::move(onDone)); - if (++_numOperationsPendingCommit == 1) { + _numOperations.startOperation(); + if (_numOperations.operationsInFlight() == 1) { enqueCommitTask(); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h index 39d1f0f47fb..417d9c21548 100644 --- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h +++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h @@ -83,9 +83,7 @@ private: // the serial num of the last feed operation in the transaction log at startup before replay SerialNum _replay_end_serial_num; uint64_t _prepare_serial_num; - size_t _numOperationsPendingCommit; - size_t _numOperationsCompleted; - size_t _numCommitsCompleted; + FeedOperationCounter _numOperations; bool _delayedPrune; mutable std::shared_mutex _feedLock; FeedStateSP _feedState; diff --git a/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h b/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h index 60e3d1c6081..b3c14173b0e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h +++ b/searchcore/src/vespa/searchcore/proton/server/i_document_scan_iterator.h @@ -27,9 +27,8 @@ struct IDocumentScanIterator * Returns an invalid document if no documents satisfy the limit. * * @param compactLidLimit The returned document must have lid larger than this limit. - * @param retry Whether we should start the scan with the previous returned document. */ - virtual search::DocumentMetaData next(uint32_t compactLidLimit, bool retry) = 0; + virtual search::DocumentMetaData next(uint32_t compactLidLimit) = 0; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp index a5c1d1fc2c9..dcc19501dd1 100644 --- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp @@ -17,7 +17,6 @@ #include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/gate.h> #include <cassert> -#include <thread> #include <vespa/log/log.h> LOG_SETUP(".proton.server.lidspace.compactionjob"); @@ -73,7 +72,7 @@ bool CompactionJob::scanDocuments(const LidUsageStats &stats) { if (_scanItr->valid()) { - DocumentMetaData document = getNextDocument(stats, false); + DocumentMetaData document = getNextDocument(stats); if (document.valid()) { Bucket metaBucket(document::Bucket(_bucketSpace, document.bucketId)); _bucketExecutor.execute(metaBucket, std::make_unique<MoveTask>(shared_from_this(), document, getLimiter().beginOperation())); @@ -190,9 +189,9 @@ CompactionJob::create(const DocumentDBLidSpaceCompactionConfig &config, } DocumentMetaData -CompactionJob::getNextDocument(const LidUsageStats &stats, bool retryLastDocument) +CompactionJob::getNextDocument(const LidUsageStats &stats) { - return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids()), retryLastDocument); + return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids())); } bool @@ -201,7 +200,6 @@ CompactionJob::run() if (isBlocked()) { return true; // indicate work is done since no work can be done } - LidUsageStats stats = _handler->getLidStatus(); if (remove_batch_is_ongoing()) { // Note that we don't set the job as blocked as the decision to un-block it is not driven externally. LOG(info, "%s: Lid space compaction is disabled while remove batch (delete buckets) is ongoing", @@ -223,7 +221,13 @@ CompactionJob::run() } if (_scanItr && !_scanItr->valid()) { - if (shouldRestartScanDocuments(_handler->getLidStatus())) { + bool numPending = getLimiter().numPending(); + if (numPending > 0) { + // We must wait to decide if a rescan is necessary until all operations are completed + return false; + } + LidUsageStats stats = _handler->getLidStatus(); + if (shouldRestartScanDocuments(stats)) { _scanItr = _handler->getIterator(); } else { _scanItr = IDocumentScanIterator::UP(); @@ -232,6 +236,7 @@ CompactionJob::run() } } + LidUsageStats stats = _handler->getLidStatus(); if (_scanItr) { return scanDocuments(stats); } else if (_shouldCompactLidSpace) { diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h index 917ff12be4a..fcdcc322f65 100644 --- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h +++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h @@ -58,7 +58,7 @@ private: void compactLidSpace(const search::LidUsageStats &stats); bool remove_batch_is_ongoing() const; bool remove_is_ongoing() const; - search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats, bool retryLastDocument); + search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats); bool scanDocuments(const search::LidUsageStats &stats); static void moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen, @@ -98,4 +98,3 @@ public: }; } // namespace proton - diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp index 06d174497b3..6b1356da50e 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp @@ -30,7 +30,6 @@ #include <vespa/log/log.h> LOG_SETUP(".proton.server.storeonlydocsubdb"); -using search::CompactionStrategy; using search::GrowStrategy; using vespalib::makeLambdaTask; using search::index::Schema; @@ -43,6 +42,7 @@ using vespalib::GenericHeader; using search::common::FileHeaderContext; using proton::initializer::InitializerTask; using searchcorespi::IFlushTarget; +using vespalib::datastore::CompactionStrategy; namespace proton { @@ -422,7 +422,7 @@ namespace { constexpr double RETIRED_DEAD_RATIO = 0.5; struct UpdateConfig : public search::attribute::IAttributeFunctor { - UpdateConfig(search::CompactionStrategy compactionStrategy) noexcept + UpdateConfig(CompactionStrategy compactionStrategy) noexcept : _compactionStrategy(compactionStrategy) {} void operator()(search::attribute::IAttributeVector &iAttributeVector) override { @@ -433,15 +433,15 @@ struct UpdateConfig : public search::attribute::IAttributeFunctor { attributeVector->update_config(cfg); } } - search::CompactionStrategy _compactionStrategy; + CompactionStrategy _compactionStrategy; }; } -search::CompactionStrategy -StoreOnlyDocSubDB::computeCompactionStrategy(search::CompactionStrategy strategy) const { +CompactionStrategy +StoreOnlyDocSubDB::computeCompactionStrategy(CompactionStrategy strategy) const { return isNodeRetired() - ? search::CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO) + ? CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO) : strategy; } @@ -464,7 +464,7 @@ StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCa bool wasNodeRetired = isNodeRetired(); _nodeRetired = calc->nodeRetired(); if (wasNodeRetired != isNodeRetired()) { - search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); + CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); auto cfg = _dms->getConfig(); cfg.setCompactionStrategy(compactionStrategy); _dms->update_config(cfg); @@ -474,7 +474,7 @@ StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCa void StoreOnlyDocSubDB::reconfigureAttributesConsideringNodeState(OnDone onDone) { - search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); + CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy); auto attrMan = getAttributeManager(); if (attrMan) { attrMan->asyncForEachAttribute(std::make_shared<UpdateConfig>(compactionStrategy), std::move(onDone)); diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h index b53dfe89f59..d43b865c000 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h @@ -153,7 +153,7 @@ private: std::shared_ptr<ShrinkLidSpaceFlushTarget> _dmsShrinkTarget; std::shared_ptr<PendingLidTrackerBase> _pendingLidsForCommit; bool _nodeRetired; - search::CompactionStrategy _lastConfiguredCompactionStrategy; + vespalib::datastore::CompactionStrategy _lastConfiguredCompactionStrategy; IFlushTargetList getFlushTargets() override; protected: @@ -234,7 +234,7 @@ public: std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override; void tearDownReferences(IDocumentDBReferenceResolver &resolver) override; PendingLidTrackerBase & getUncommittedLidsTracker() override { return *_pendingLidsForCommit; } - search::CompactionStrategy computeCompactionStrategy(search::CompactionStrategy strategy) const; + vespalib::datastore::CompactionStrategy computeCompactionStrategy(vespalib::datastore::CompactionStrategy strategy) const; bool isNodeRetired() const { return _nodeRetired; } }; diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h index d9b83bfc3a8..7194cc4d403 100644 --- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h +++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h @@ -10,13 +10,11 @@ struct DummyDocumentStore : public search::IDocumentStore { vespalib::string _baseDir; - DummyDocumentStore() - : _baseDir("") - {} + DummyDocumentStore() = default; DummyDocumentStore(const vespalib::string &baseDir) : _baseDir(baseDir) {} - ~DummyDocumentStore() {} + ~DummyDocumentStore() = default; DocumentUP read(search::DocumentIdT, const document::DocumentTypeRepo &) const override { return DocumentUP(); } @@ -25,7 +23,8 @@ struct DummyDocumentStore : public search::IDocumentStore void remove(uint64_t, search::DocumentIdT) override {} void flush(uint64_t) override {} uint64_t initFlush(uint64_t) override { return 0; } - void compact(uint64_t) override {} + void compactBloat(uint64_t) override {} + void compactSpread(uint64_t) override {} uint64_t lastSyncToken() const override { return 0; } uint64_t tentativeLastSyncToken() const override { return 0; } vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); } @@ -34,7 +33,7 @@ struct DummyDocumentStore : public search::IDocumentStore size_t memoryMeta() const override { return 0; } size_t getDiskFootprint() const override { return 0; } size_t getDiskBloat() const override { return 0; } - size_t getMaxCompactGain() const override { return getDiskBloat(); } + size_t getMaxSpreadAsBloat() const override { return getDiskBloat(); } search::CacheStats getCacheStats() const override { return search::CacheStats(); } const vespalib::string &getBaseDir() const override { return _baseDir; } void accept(search::IDocumentStoreReadVisitor &, diff --git a/searchcore/src/vespa/searchcore/proton/test/test.h b/searchcore/src/vespa/searchcore/proton/test/test.h index 1494823e899..4231d5e7717 100644 --- a/searchcore/src/vespa/searchcore/proton/test/test.h +++ b/searchcore/src/vespa/searchcore/proton/test/test.h @@ -5,7 +5,6 @@ #include "bucketdocuments.h" #include "bucketstatecalculator.h" #include "document.h" -#include "dummy_document_store.h" #include "dummy_feed_view.h" #include "dummy_summary_manager.h" #include "resulthandler.h" diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json index e5611324254..2d7daf2300e 100644 --- a/searchlib/abi-spec.json +++ b/searchlib/abi-spec.json @@ -1457,6 +1457,7 @@ "protected void <init>(com.google.common.collect.ImmutableMap, java.util.Map)", "public com.yahoo.searchlib.rankingexpression.ExpressionFunction getFunction(java.lang.String)", "protected com.google.common.collect.ImmutableMap functions()", + "protected java.util.Map getFunctions()", "public java.lang.String getBinding(java.lang.String)", "public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withBindings(java.util.Map)", "public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withoutBindings()" @@ -1611,6 +1612,7 @@ "public void <init>(java.util.Map)", "public void <init>(java.util.Collection, java.util.Map)", "public void <init>(java.util.Collection, java.util.Map, java.util.Map)", + "public void <init>(java.util.Map, java.util.Map, java.util.Map)", "public void <init>(com.google.common.collect.ImmutableMap, java.util.Map, java.util.Map)", "public void addFunctionSerialization(java.lang.String, java.lang.String)", "public void addArgumentTypeSerialization(java.lang.String, java.lang.String, com.yahoo.tensor.TensorType)", diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java index f0586297b0d..287bc2655f5 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java @@ -17,7 +17,7 @@ import java.util.Map; public class FunctionReferenceContext { /** Expression functions indexed by name */ - private final ImmutableMap<String, ExpressionFunction> functions; + private final Map<String, ExpressionFunction> functions; /** Mapping from argument names to the expressions they resolve to */ private final Map<String, String> bindings = new HashMap<>(); @@ -43,26 +43,32 @@ public class FunctionReferenceContext { /** Create a context for a single serialization task */ public FunctionReferenceContext(Map<String, ExpressionFunction> functions, Map<String, String> bindings) { - this(ImmutableMap.copyOf(functions), bindings); + this.functions = Map.copyOf(functions); + if (bindings != null) + this.bindings.putAll(bindings); } + /** @deprecated Use {@link #FunctionReferenceContext(Map, Map)} instead */ + @Deprecated(forRemoval = true, since = "7") protected FunctionReferenceContext(ImmutableMap<String, ExpressionFunction> functions, Map<String, String> bindings) { - this.functions = functions; - if (bindings != null) - this.bindings.putAll(bindings); + this((Map<String, ExpressionFunction>)functions, bindings); } - private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { - ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>(); + private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { + Map<String, ExpressionFunction> mapBuilder = new HashMap<>(); for (ExpressionFunction function : list) mapBuilder.put(function.getName(), function); - return mapBuilder.build(); + return Map.copyOf(mapBuilder); } /** Returns a function or null if it isn't defined in this context */ public ExpressionFunction getFunction(String name) { return functions.get(name); } - protected ImmutableMap<String, ExpressionFunction> functions() { return functions; } + /** @deprecated Use {@link #getFunctions()} instead */ + @Deprecated(forRemoval = true, since = "7") + protected ImmutableMap<String, ExpressionFunction> functions() { return ImmutableMap.copyOf(functions); } + + protected Map<String, ExpressionFunction> getFunctions() { return functions; } /** Returns the resolution of an identifier, or null if it isn't defined in this context */ public String getBinding(String name) { return bindings.get(name); } diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java index cd2f966cc22..535ad013caf 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java @@ -8,6 +8,7 @@ import com.yahoo.tensor.TensorType; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -54,11 +55,11 @@ public class SerializationContext extends FunctionReferenceContext { this(toMap(functions), bindings, serializedFunctions); } - private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { - ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>(); + private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) { + Map<String,ExpressionFunction> mapBuilder = new HashMap<>(); for (ExpressionFunction function : list) mapBuilder.put(function.getName(), function); - return mapBuilder.build(); + return Map.copyOf(mapBuilder); } /** @@ -69,12 +70,19 @@ public class SerializationContext extends FunctionReferenceContext { * @param serializedFunctions a cache of serializedFunctions - the ownership of this map * is <b>transferred</b> to this and will be modified in it */ - public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings, + public SerializationContext(Map<String,ExpressionFunction> functions, Map<String, String> bindings, Map<String, String> serializedFunctions) { super(functions, bindings); this.serializedFunctions = serializedFunctions; } + /** @deprecated Use {@link #SerializationContext(Map, Map, Map) instead}*/ + @Deprecated(forRemoval = true, since = "7") + public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings, + Map<String, String> serializedFunctions) { + this((Map<String, ExpressionFunction>)functions, bindings, serializedFunctions); + } + /** Adds the serialization of a function */ public void addFunctionSerialization(String name, String expressionString) { serializedFunctions.put(name, expressionString); @@ -93,13 +101,13 @@ public class SerializationContext extends FunctionReferenceContext { @Override public SerializationContext withBindings(Map<String, String> bindings) { - return new SerializationContext(functions(), bindings, this.serializedFunctions); + return new SerializationContext(getFunctions(), bindings, this.serializedFunctions); } /** Returns a fresh context without bindings */ @Override public SerializationContext withoutBindings() { - return new SerializationContext(functions(), null, this.serializedFunctions); + return new SerializationContext(getFunctions(), null, this.serializedFunctions); } public Map<String, String> serializedFunctions() { return serializedFunctions; } diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java index ba5a243464e..d873963bb6e 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java @@ -328,7 +328,14 @@ public class TensorFunctionNode extends CompositeNode { /** Returns a function or null if it isn't defined in this context */ public ExpressionFunction getFunction(String name) { return wrappedSerializationContext.getFunction(name); } - protected ImmutableMap<String, ExpressionFunction> functions() { return wrappedSerializationContext.functions(); } + /** @deprecated Use {@link #getFunctions()} instead */ + @SuppressWarnings("removal") + @Deprecated(forRemoval = true, since = "7") + protected ImmutableMap<String, ExpressionFunction> functions() { + return ImmutableMap.copyOf(wrappedSerializationContext.getFunctions()); + } + + @Override protected Map<String, ExpressionFunction> getFunctions() { return wrappedSerializationContext.getFunctions(); } public ToStringContext parent() { return wrappedToStringContext; } @@ -344,14 +351,14 @@ public class TensorFunctionNode extends CompositeNode { /** Returns a new context with the bindings replaced by the given bindings */ @Override public ExpressionToStringContext withBindings(Map<String, String> bindings) { - SerializationContext serializationContext = new SerializationContext(functions(), bindings, serializedFunctions()); + SerializationContext serializationContext = new SerializationContext(getFunctions(), bindings, serializedFunctions()); return new ExpressionToStringContext(serializationContext, wrappedToStringContext, path, parent); } /** Returns a fresh context without bindings */ @Override public SerializationContext withoutBindings() { - SerializationContext serializationContext = new SerializationContext(functions(), null, serializedFunctions()); + SerializationContext serializationContext = new SerializationContext(getFunctions(), null, serializedFunctions()); return new ExpressionToStringContext(serializationContext, null, path, parent); } } diff --git a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp index 3c8c9ff17e0..16a04a746f3 100644 --- a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp +++ b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp @@ -49,7 +49,7 @@ void verify_roundtrip_serialization(const HnswIPO& hnsw_params_in) { auto gen_header = populate_header(hnsw_params_in); - auto attr_header = AttributeHeader::extractTags(gen_header); + auto attr_header = AttributeHeader::extractTags(gen_header, file_name); EXPECT_EQ(tensor_cfg.basicType(), attr_header.getBasicType()); EXPECT_EQ(tensor_cfg.collectionType(), attr_header.getCollectionType()); diff --git a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp index 801aa9341fb..fbec89d27eb 100644 --- a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp +++ b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp @@ -16,6 +16,7 @@ using search::attribute::Config; using search::attribute::BasicType; using search::attribute::CollectionType; using vespalib::AddressSpace; +using vespalib::datastore::CompactionStrategy; using AttributePtr = AttributeVector::SP; using AttributeStatus = search::attribute::Status; @@ -237,7 +238,7 @@ TEST_F("Compaction limits address space usage (dead) when free lists are NOT use { populate_and_hammer(f, true); AddressSpace afterSpace = f.getMultiValueAddressSpaceUsage("after"); - EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead()); + EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead()); } TEST_F("Compaction is not executed when free lists are used", @@ -266,7 +267,7 @@ TEST_F("Compaction is peformed when compaction strategy is changed to enable com f._v->commit(); // new commit might trigger further compaction after2 = f.getMultiValueAddressSpaceUsage("after2"); } - EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead()); + EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead()); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp index 2c8fc2966b0..b30b3e4eb71 100644 --- a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp +++ b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp @@ -175,7 +175,7 @@ void CompactionTest<VectorType>::test_enum_store_compaction() { constexpr uint32_t canary_stride = 256; - uint32_t dead_limit = search::CompactionStrategy::DEAD_BYTES_SLACK / 8; + uint32_t dead_limit = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / 8; uint32_t doc_count = dead_limit * 3; if (_v->hasMultiValue() || std::is_same_v<VectorType,StringAttribute>) { doc_count /= 2; diff --git a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp index 9c25429932b..5346cc7f764 100644 --- a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp +++ b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp @@ -7,7 +7,23 @@ LOG_SETUP("enumstore_test"); using Type = search::DictionaryConfig::Type; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; +using vespalib::datastore::EntryRefFilter; +using RefT = vespalib::datastore::EntryRefT<22>; + +namespace vespalib::datastore { + +/* + * Print EntryRef as RefT which is used by test_normalize_posting_lists and + * test_foreach_posting_list to differentiate between buffers + */ +void PrintTo(const EntryRef &ref, std::ostream* os) { + RefT iref(ref); + *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")"; +} + +} namespace search { @@ -346,16 +362,16 @@ TEST(EnumStoreTest, address_space_usage_is_reported) NumericEnumStore store(false, DictionaryConfig::Type::BTREE); using vespalib::AddressSpace; - EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_values_address_space_usage()); EnumIndex idx1 = store.insert(10); - EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_values_address_space_usage()); EnumIndex idx2 = store.insert(20); // Address limit increases because buffer is re-sized. - EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); dec_ref_count(store, idx1); - EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); dec_ref_count(store, idx2); - EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_address_space_usage()); + EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_values_address_space_usage()); } class BatchUpdaterTest : public ::testing::Test { @@ -597,6 +613,11 @@ public: void update_posting_idx(EnumIndex enum_idx, EntryRef old_posting_idx, EntryRef new_posting_idx); EnumIndex insert_value(size_t value_idx); + void populate_sample_data(uint32_t cnt); + std::vector<EntryRef> get_sample_values(uint32_t cnt); + void clear_sample_values(uint32_t cnt); + void test_normalize_posting_lists(bool use_filter, bool one_filter); + void test_foreach_posting_list(bool one_filter); static EntryRef fake_pidx() { return EntryRef(42); } }; @@ -620,6 +641,149 @@ EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::insert_value(size_t val return enum_idx; } +namespace { +/* + * large_population should trigger multiple callbacks from normalize_values + * and foreach_value + */ +constexpr uint32_t large_population = 1200; + +uint32_t select_buffer(uint32_t i) { + if ((i % 2) == 0) { + return 0; + } + if ((i % 3) == 0) { + return 1; + } + if ((i % 5) == 0) { + return 2; + } + return 3; +} + +EntryRef make_fake_pidx(uint32_t i) { return RefT(i + 200, select_buffer(i)); } +EntryRef make_fake_adjusted_pidx(uint32_t i) { return RefT(i + 500, select_buffer(i)); } +EntryRef adjust_fake_pidx(EntryRef ref) { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); } + +} + + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::populate_sample_data(uint32_t cnt) +{ + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto enum_idx = store.insert(i); + EXPECT_TRUE(enum_idx.valid()); + EntryRef posting_idx(make_fake_pidx(i)); + dict.update_posting_list(enum_idx, store.get_comparator(), [posting_idx](EntryRef) noexcept -> EntryRef { return posting_idx; }); + } +} + +template <typename EnumStoreTypeAndDictionaryType> +std::vector<EntryRef> +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::get_sample_values(uint32_t cnt) +{ + std::vector<EntryRef> result; + result.reserve(cnt); + store.freeze_dictionary(); + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto compare = store.make_comparator(i); + auto enum_idx = dict.find(compare); + EXPECT_TRUE(enum_idx.valid()); + EntryRef posting_idx; + dict.update_posting_list(enum_idx, compare, [&posting_idx](EntryRef ref) noexcept { posting_idx = ref; return ref; });; + auto find_result = dict.find_posting_list(compare, dict.get_frozen_root()); + EXPECT_EQ(enum_idx, find_result.first); + EXPECT_EQ(posting_idx, find_result.second); + result.emplace_back(find_result.second); + } + return result; +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::clear_sample_values(uint32_t cnt) +{ + auto& dict = store.get_dictionary(); + for (uint32_t i = 0; i < cnt; ++i) { + auto comparator = store.make_comparator(i); + auto enum_idx = dict.find(comparator); + EXPECT_TRUE(enum_idx.valid()); + dict.update_posting_list(enum_idx, comparator, [](EntryRef) noexcept -> EntryRef { return EntryRef(); }); + } +} + +namespace { + +EntryRefFilter make_entry_ref_filter(bool one_filter) +{ + if (one_filter) { + EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits); + filter.add_buffer(3); + return filter; + } + return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits); +} + +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_normalize_posting_lists(bool use_filter, bool one_filter) +{ + populate_sample_data(large_population); + auto& dict = store.get_dictionary(); + std::vector<EntryRef> exp_refs; + std::vector<EntryRef> exp_adjusted_refs; + exp_refs.reserve(large_population); + exp_adjusted_refs.reserve(large_population); + for (uint32_t i = 0; i < large_population; ++i) { + exp_refs.emplace_back(make_fake_pidx(i)); + if (!use_filter || !one_filter || select_buffer(i) == 3) { + exp_adjusted_refs.emplace_back(make_fake_adjusted_pidx(i)); + } else { + exp_adjusted_refs.emplace_back(make_fake_pidx(i)); + } + } + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + if (use_filter) { + auto filter = make_entry_ref_filter(one_filter); + auto dummy = [](std::vector<EntryRef>&) noexcept { }; + auto adjust_refs = [](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { ref = adjust_fake_pidx(ref); } }; + EXPECT_FALSE(dict.normalize_posting_lists(dummy, filter)); + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs, filter)); + } else { + auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; }; + auto adjust_refs = [](EntryRef ref) noexcept { return adjust_fake_pidx(ref); }; + EXPECT_FALSE(dict.normalize_posting_lists(dummy)); + EXPECT_EQ(exp_refs, get_sample_values(large_population)); + EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs)); + } + EXPECT_EQ(exp_adjusted_refs, get_sample_values(large_population)); + clear_sample_values(large_population); +} + +template <typename EnumStoreTypeAndDictionaryType> +void +EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_foreach_posting_list(bool one_filter) +{ + auto filter = make_entry_ref_filter(one_filter); + populate_sample_data(large_population); + auto& dict = store.get_dictionary(); + std::vector<EntryRef> exp_refs; + auto save_exp_refs = [&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); }; + EXPECT_FALSE(dict.normalize_posting_lists(save_exp_refs, filter)); + std::vector<EntryRef> act_refs; + auto save_act_refs = [&act_refs](const std::vector<EntryRef>& refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); }; + dict.foreach_posting_list(save_act_refs, filter); + EXPECT_EQ(exp_refs, act_refs); + clear_sample_values(large_population); +} + // Disable warnings emitted by gtest generated files when using typed tests #pragma GCC diagnostic push #ifndef __clang__ @@ -678,26 +842,27 @@ TYPED_TEST(EnumStoreDictionaryTest, find_posting_list_works) TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_works) { - auto value_0_idx = this->insert_value(0); - this->update_posting_idx(value_0_idx, EntryRef(), this->fake_pidx()); - this->store.freeze_dictionary(); - auto& dict = this->store.get_dictionary(); - auto root = dict.get_frozen_root(); - auto find_result = dict.find_posting_list(this->make_bound_comparator(0), root); - EXPECT_EQ(value_0_idx, find_result.first); - EXPECT_EQ(this->fake_pidx(), find_result.second); - auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; }; - std::vector<EntryRef> saved_refs; - auto save_refs_and_clear = [&saved_refs](EntryRef posting_idx) { saved_refs.push_back(posting_idx); return EntryRef(); }; - EXPECT_FALSE(dict.normalize_posting_lists(dummy)); - EXPECT_TRUE(dict.normalize_posting_lists(save_refs_and_clear)); - EXPECT_FALSE(dict.normalize_posting_lists(save_refs_and_clear)); - EXPECT_EQ((std::vector<EntryRef>{ this->fake_pidx(), EntryRef() }), saved_refs); - this->store.freeze_dictionary(); - root = dict.get_frozen_root(); - find_result = dict.find_posting_list(this->make_bound_comparator(0), root); - EXPECT_EQ(value_0_idx, find_result.first); - EXPECT_EQ(EntryRef(), find_result.second); + this->test_normalize_posting_lists(false, false); +} + +TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_all_filter_works) +{ + this->test_normalize_posting_lists(true, false); +} + +TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_one_filter_works) +{ + this->test_normalize_posting_lists(true, true); +} + +TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_all_filter_works) +{ + this->test_foreach_posting_list(false); +} + +TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_one_filter_works) +{ + this->test_foreach_posting_list(true); } namespace { @@ -714,7 +879,7 @@ void inc_generation(generation_t &gen, NumericEnumStore &store) TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) { - size_t entry_count = (search::CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40; + size_t entry_count = (CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40; auto updater = this->store.make_batch_updater(); for (int32_t i = 0; (size_t) i < entry_count; ++i) { auto idx = updater.insert(i); @@ -727,15 +892,15 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) inc_generation(gen, this->store); auto& dict = this->store.get_dictionary(); if (dict.get_has_btree_dictionary()) { - EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); + EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); } if (dict.get_has_hash_dictionary()) { - EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); + EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); } int compact_count = 0; - search::CompactionStrategy compaction_strategy; + CompactionStrategy compaction_strategy; for (uint32_t i = 0; i < 15; ++i) { - this->store.update_stat(); + this->store.update_stat(compaction_strategy); if (this->store.consider_compact_dictionary(compaction_strategy)) { ++compact_count; } else { @@ -747,10 +912,10 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works) EXPECT_LT((TypeParam::type == Type::BTREE_AND_HASH) ? 1 : 0, compact_count); EXPECT_GT(15, compact_count); if (dict.get_has_btree_dictionary()) { - EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); + EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes()); } if (dict.get_has_hash_dictionary()) { - EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); + EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes()); } std::vector<int32_t> exp_values; std::vector<int32_t> values; diff --git a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp index 8b1906573d4..bddaa4f4e31 100644 --- a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp +++ b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp @@ -14,6 +14,8 @@ LOG_SETUP("multivaluemapping_test"); using vespalib::datastore::ArrayStoreConfig; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; template <typename EntryT> void @@ -142,7 +144,9 @@ public: } void compactWorst() { - _mvMapping->compactWorst(true, false); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + _mvMapping->compactWorst(compaction_spec, compaction_strategy); _attr->commit(); _attr->incGeneration(); } diff --git a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp index cd78332cacd..10cc14012dd 100644 --- a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp +++ b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp @@ -13,6 +13,7 @@ #include <ostream> using vespalib::GenerationHandler; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace search::attribute { @@ -160,9 +161,9 @@ PostingStoreTest::test_compact_sequence(uint32_t sequence_length) EntryRef old_ref2 = get_posting_ref(2); auto usage_before = store.getMemoryUsage(); bool compaction_done = false; - search::CompactionStrategy compaction_strategy(0.05, 0.2); + CompactionStrategy compaction_strategy(0.05, 0.2); for (uint32_t pass = 0; pass < 45; ++pass) { - store.update_stat(); + store.update_stat(compaction_strategy); auto guard = _gen_handler.takeGuard(); if (!store.consider_compact_worst_buffers(compaction_strategy)) { compaction_done = true; @@ -193,9 +194,9 @@ PostingStoreTest::test_compact_btree_nodes(uint32_t sequence_length) EntryRef old_ref2 = get_posting_ref(2); auto usage_before = store.getMemoryUsage(); bool compaction_done = false; - search::CompactionStrategy compaction_strategy(0.05, 0.2); + CompactionStrategy compaction_strategy(0.05, 0.2); for (uint32_t pass = 0; pass < 55; ++pass) { - store.update_stat(); + store.update_stat(compaction_strategy); auto guard = _gen_handler.takeGuard(); if (!store.consider_compact_worst_btree_nodes(compaction_strategy)) { compaction_done = true; diff --git a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp index c077ab83a6e..1a8eda40f52 100644 --- a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp +++ b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp @@ -176,7 +176,7 @@ struct ReferenceAttributeTest : public ::testing::Test { search::attribute::Status newStatus = oldStatus; uint64_t iter = 0; AttributeGuard guard(_attr); - uint64_t dropCount = search::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference); + uint64_t dropCount = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference); for (; iter < iterLimit; ++iter) { clear(2); set(2, toGid(doc2)); diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp index 922b0d4fb3e..8a6f1e08fa6 100644 --- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp +++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp @@ -38,7 +38,6 @@ using document::WrongTensorTypeException; using search::AddressSpaceUsage; using search::AttributeGuard; using search::AttributeVector; -using search::CompactionStrategy; using search::attribute::DistanceMetric; using search::attribute::HnswIndexParams; using search::queryeval::GlobalFilter; @@ -56,6 +55,7 @@ using search::tensor::NearestNeighborIndexLoader; using search::tensor::NearestNeighborIndexSaver; using search::tensor::PrepareResult; using search::tensor::TensorAttribute; +using vespalib::datastore::CompactionStrategy; using vespalib::eval::TensorSpec; using vespalib::eval::CellType; using vespalib::eval::ValueType; @@ -222,7 +222,8 @@ public: bool consider_compact(const CompactionStrategy&) override { return false; } - vespalib::MemoryUsage update_stat() override { + vespalib::MemoryUsage update_stat(const CompactionStrategy&) override { + ++_memory_usage_cnt; return vespalib::MemoryUsage(); } vespalib::MemoryUsage memory_usage() const override { diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp index dec7b911f65..f2bec30a349 100644 --- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp +++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp @@ -25,6 +25,7 @@ struct NullDataStore : IDataStore { size_t memoryMeta() const override { return 0; } size_t getDiskFootprint() const override { return 0; } size_t getDiskBloat() const override { return 0; } + size_t getMaxSpreadAsBloat() const override { return 0; } uint64_t lastSyncToken() const override { return 0; } uint64_t tentativeLastSyncToken() const override { return 0; } vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); } diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp index 07652dfd336..378babb6ee1 100644 --- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp +++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp @@ -236,7 +236,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3 datastore.remove(i + 20000, i); } datastore.flush(datastore.initFlush(lastSyncToken)); - datastore.compact(30000); + datastore.compactBloat(30000); datastore.remove(31000, 0); checkStats(datastore, 31000, 30000); EXPECT_LESS_EQUAL(minFiles, datastore.getAllActiveFiles().size()); @@ -252,7 +252,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3 } TEST("testGrowingChunkedBySize") { LogDataStore::Config config; - config.setMaxFileSize(100000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) + config.setMaxFileSize(100000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) .compactCompression({CompressionConfig::LZ4}) .setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000}); verifyGrowing(config, 40, 120); @@ -260,7 +260,7 @@ TEST("testGrowingChunkedBySize") { TEST("testGrowingChunkedByNumLids") { LogDataStore::Config config; - config.setMaxNumLids(1000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) + config.setMaxNumLids(1000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2) .compactCompression({CompressionConfig::LZ4}) .setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000}); verifyGrowing(config,10, 10); @@ -679,7 +679,7 @@ TEST("testWriteRead") { EXPECT_LESS(0u, headerFootprint); EXPECT_EQUAL(datastore.getDiskFootprint(), headerFootprint); EXPECT_EQUAL(datastore.getDiskBloat(), 0ul); - EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul); + EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul); datastore.write(1, 0, a[0].c_str(), a[0].size()); fetchAndTest(datastore, 0, a[0].c_str(), a[0].size()); datastore.write(2, 0, a[1].c_str(), a[1].size()); @@ -701,7 +701,7 @@ TEST("testWriteRead") { EXPECT_EQUAL(datastore.getDiskFootprint(), 2711ul + headerFootprint); EXPECT_EQUAL(datastore.getDiskBloat(), 0ul); - EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul); + EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul); datastore.flush(datastore.initFlush(lastSyncToken)); } { @@ -715,7 +715,7 @@ TEST("testWriteRead") { EXPECT_LESS(0u, headerFootprint); EXPECT_EQUAL(4944ul + headerFootprint, datastore.getDiskFootprint()); EXPECT_EQUAL(0ul, datastore.getDiskBloat()); - EXPECT_EQUAL(0ul, datastore.getMaxCompactGain()); + EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat()); for(size_t i=0; i < 100; i++) { fetchAndTest(datastore, i, a[i%2].c_str(), a[i%2].size()); @@ -730,7 +730,7 @@ TEST("testWriteRead") { EXPECT_EQUAL(7594ul + headerFootprint, datastore.getDiskFootprint()); EXPECT_EQUAL(0ul, datastore.getDiskBloat()); - EXPECT_EQUAL(0ul, datastore.getMaxCompactGain()); + EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat()); } FastOS_File::EmptyAndRemoveDirectory("empty"); } @@ -1050,7 +1050,6 @@ TEST("require that config equality operator detects inequality") { using C = LogDataStore::Config; EXPECT_TRUE(C() == C()); EXPECT_FALSE(C() == C().setMaxFileSize(1)); - EXPECT_FALSE(C() == C().setMaxDiskBloatFactor(0.3)); EXPECT_FALSE(C() == C().setMaxBucketSpread(0.3)); EXPECT_FALSE(C() == C().setMinFileSizeFactor(0.3)); EXPECT_FALSE(C() == C().setFileConfig(WriteableFileChunk::Config({}, 70))); diff --git a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp index 032960c3799..149662cd266 100644 --- a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp +++ b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp @@ -75,10 +75,19 @@ assertArraySize(const vespalib::string &tensorType, uint32_t expArraySize) { TEST("require that array size is calculated correctly") { - TEST_DO(assertArraySize("tensor(x[1])", 32)); + TEST_DO(assertArraySize("tensor(x[1])", 8)); TEST_DO(assertArraySize("tensor(x[10])", 96)); TEST_DO(assertArraySize("tensor(x[3])", 32)); TEST_DO(assertArraySize("tensor(x[10],y[10])", 800)); + TEST_DO(assertArraySize("tensor<int8>(x[1])", 8)); + TEST_DO(assertArraySize("tensor<int8>(x[8])", 8)); + TEST_DO(assertArraySize("tensor<int8>(x[9])", 16)); + TEST_DO(assertArraySize("tensor<int8>(x[16])", 16)); + TEST_DO(assertArraySize("tensor<int8>(x[17])", 32)); + TEST_DO(assertArraySize("tensor<int8>(x[32])", 32)); + TEST_DO(assertArraySize("tensor<int8>(x[33])", 64)); + TEST_DO(assertArraySize("tensor<int8>(x[64])", 64)); + TEST_DO(assertArraySize("tensor<int8>(x[65])", 96)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp index 7acd3cf8b57..6054d473c1f 100644 --- a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp +++ b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp @@ -1,12 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchlib/common/bitvector.h> #include <vespa/searchlib/tensor/distance_functions.h> #include <vespa/searchlib/tensor/doc_vector_access.h> #include <vespa/searchlib/tensor/hnsw_index.h> #include <vespa/searchlib/tensor/random_level_generator.h> #include <vespa/searchlib/tensor/inv_log_level_generator.h> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/gtest/gtest.h> #include <vespa/vespalib/util/generationhandler.h> #include <vespa/vespalib/data/slime/slime.h> @@ -21,7 +22,8 @@ using namespace search::tensor; using namespace vespalib::slime; using vespalib::Slime; using search::BitVector; -using search::CompactionStrategy; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; template <typename FloatType> class MyDocVectorAccess : public DocVectorAccess { @@ -116,7 +118,8 @@ public: } MemoryUsage commit_and_update_stat() { commit(); - return index->update_stat(); + CompactionStrategy compaction_strategy; + return index->update_stat(compaction_strategy); } void expect_entry_point(uint32_t exp_docid, uint32_t exp_level) { EXPECT_EQ(exp_docid, index->get_entry_docid()); @@ -628,10 +631,12 @@ TEST_F(HnswIndexTest, hnsw_graph_is_compacted) for (uint32_t i = 0; i < 10; ++i) { mem_1 = mem_2; // Forced compaction to move things around - index->compact_link_arrays(true, false); - index->compact_level_arrays(true, false); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + index->compact_link_arrays(compaction_spec, compaction_strategy); + index->compact_level_arrays(compaction_spec, compaction_strategy); commit(); - index->update_stat(); + index->update_stat(compaction_strategy); mem_2 = commit_and_update_stat(); EXPECT_LE(mem_2.usedBytes(), mem_1.usedBytes()); if (mem_2.usedBytes() == mem_1.usedBytes()) { diff --git a/searchlib/src/tests/transactionlog/translogclient_test.cpp b/searchlib/src/tests/transactionlog/translogclient_test.cpp index 5740eeb610d..d3c3af3a9ca 100644 --- a/searchlib/src/tests/transactionlog/translogclient_test.cpp +++ b/searchlib/src/tests/transactionlog/translogclient_test.cpp @@ -7,6 +7,7 @@ #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/document/util/bytebuffer.h> #include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/destructor_callbacks.h> #include <vespa/fastos/file.h> #include <thread> @@ -316,43 +317,33 @@ fillDomainTest(Session * s1, size_t numPackets, size_t numEntries) } } -using Counter = std::atomic<size_t>; - -class CountDone : public IDestructorCallback { -public: - explicit CountDone(Counter & inFlight) noexcept : _inFlight(inFlight) { ++_inFlight; } - ~CountDone() override { --_inFlight; } -private: - Counter & _inFlight; -}; - void -fillDomainTest(TransLogServer & s1, const vespalib::string & domain, size_t numPackets, size_t numEntries) +fillDomainTest(IDestructorCallback::SP onDone, TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries) { size_t value(0); - Counter inFlight(0); - auto domainWriter = s1.getWriter(domain); - for(size_t i=0; i < numPackets; i++) { - std::unique_ptr<Packet> p(new Packet(DEFAULT_PACKET_SIZE)); - for(size_t j=0; j < numEntries; j++, value++) { - Packet::Entry e(value+1, j+1, vespalib::ConstBufferRef((const char *)&value, sizeof(value))); + auto domainWriter = tls.getWriter(domain); + + for (size_t i = 0; i < numPackets; i++) { + auto p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE); + for (size_t j = 0; j < numEntries; j++, value++) { + Packet::Entry e(value + 1, j + 1, vespalib::ConstBufferRef((const char *) &value, sizeof(value))); p->add(e); - if ( p->sizeBytes() > DEFAULT_PACKET_SIZE ) { - domainWriter->append(*p, std::make_shared<CountDone>(inFlight)); + if (p->sizeBytes() > DEFAULT_PACKET_SIZE) { + domainWriter->append(*p, onDone); p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE); } } - domainWriter->append(*p, std::make_shared<CountDone>(inFlight)); - auto keep = domainWriter->startCommit(Writer::DoneCallback()); - LOG(info, "Inflight %ld", inFlight.load()); - } - while (inFlight.load() != 0) { - std::this_thread::sleep_for(10ms); - LOG(info, "Waiting for inflight %ld to reach zero", inFlight.load()); + domainWriter->append(*p, onDone); + auto keep = domainWriter->startCommit(onDone); } - } +void +fillDomainTest(TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries) { + vespalib::Gate gate; + fillDomainTest(std::make_shared<vespalib::GateCallback>(gate), tls, domain, numPackets, numEntries); + gate.await(); +} void fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t entrySize) @@ -545,7 +536,7 @@ partialUpdateTest(const vespalib::string & testDir) { ASSERT_TRUE( visitor->visit(5, 7) ); for (size_t i(0); ! ca._eof && (i < 1000); i++ ) { std::this_thread::sleep_for(10ms); } ASSERT_TRUE( ca._eof ); - ASSERT_TRUE( ca.map().size() == 1); + ASSERT_EQUAL(1u, ca.map().size()); ASSERT_TRUE( ca.hasSerial(7) ); CallBackUpdate ca1; diff --git a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt index 4f46c279565..9e5a8d4dfbb 100644 --- a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt +++ b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt @@ -41,6 +41,7 @@ vespa_add_library(searchlib_attribute OBJECT enumattributesaver.cpp enumcomparator.cpp enumhintsearchcontext.cpp + enum_store_compaction_spec.cpp enum_store_dictionary.cpp enum_store_loaders.cpp enumstore.cpp diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp index b68923b90bf..e40717e6375 100644 --- a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp +++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp @@ -191,9 +191,9 @@ AttributeHeader::internalExtractTags(const vespalib::GenericHeader &header) } AttributeHeader -AttributeHeader::extractTags(const vespalib::GenericHeader &header) +AttributeHeader::extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name) { - AttributeHeader result; + AttributeHeader result(file_name); result.internalExtractTags(header); return result; } diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.h b/searchlib/src/vespa/searchlib/attribute/attribute_header.h index 00da28baf80..7c0b8f3084b 100644 --- a/searchlib/src/vespa/searchlib/attribute/attribute_header.h +++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.h @@ -69,7 +69,7 @@ public: bool getPredicateParamsSet() const { return _predicateParamsSet; } bool getCollectionTypeParamsSet() const { return _collectionTypeParamsSet; } const std::optional<HnswIndexParams>& get_hnsw_index_params() const { return _hnsw_index_params; } - static AttributeHeader extractTags(const vespalib::GenericHeader &header); + static AttributeHeader extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name); void addTags(vespalib::GenericHeader &header) const; }; diff --git a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp index 3bc1e5ec25f..a2ac482ebf3 100644 --- a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp +++ b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp @@ -800,6 +800,7 @@ AttributeVector::update_config(const Config& cfg) } drain_hold(1_Mi); // Wait until 1MiB or less on hold _config.setCompactionStrategy(cfg.getCompactionStrategy()); + updateStat(true); commit(); // might trigger compaction drain_hold(1_Mi); // Wait until 1MiB or less on hold } diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp new file mode 100644 index 00000000000..43f599346f4 --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.cpp @@ -0,0 +1,30 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "enum_store_compaction_spec.h" +#include "i_enum_store.h" +#include "i_enum_store_dictionary.h" +#include <vespa/vespalib/datastore/compaction_strategy.h> +#include <vespa/vespalib/util/address_space.h> + +namespace search::enumstore { + +using vespalib::datastore::CompactionStrategy; + +vespalib::MemoryUsage +EnumStoreCompactionSpec::update_stat(IEnumStore& enum_store, const CompactionStrategy& compaction_strategy) +{ + auto values_memory_usage = enum_store.get_values_memory_usage(); + auto values_address_space_usage = enum_store.get_values_address_space_usage(); + _values = compaction_strategy.should_compact(values_memory_usage, values_address_space_usage); + auto& dict = enum_store.get_dictionary(); + auto dictionary_btree_usage = dict.get_btree_memory_usage(); + _btree_dictionary = compaction_strategy.should_compact_memory(dictionary_btree_usage); + auto dictionary_hash_usage = dict.get_hash_memory_usage(); + _hash_dictionary = compaction_strategy.should_compact_memory(dictionary_hash_usage); + auto retval = values_memory_usage; + retval.merge(dictionary_btree_usage); + retval.merge(dictionary_hash_usage); + return retval; +} + +} diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h new file mode 100644 index 00000000000..11ecb4e93ef --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_compaction_spec.h @@ -0,0 +1,35 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/vespalib/datastore/compaction_spec.h> + +namespace search { class IEnumStore; } +namespace vespalib { class MemoryUsage; } +namespace vespalib::datastore { class CompactionStrategy; } + +namespace search::enumstore { + +/* + * Class describing how to compact an enum store + */ +class EnumStoreCompactionSpec { + using CompactionSpec = vespalib::datastore::CompactionSpec; + CompactionSpec _values; + bool _btree_dictionary; + bool _hash_dictionary; +public: + EnumStoreCompactionSpec() noexcept + : _values(), + _btree_dictionary(false), + _hash_dictionary(false) + { + } + + CompactionSpec get_values() const noexcept { return _values; } + bool btree_dictionary() const noexcept { return _btree_dictionary; } + bool hash_dictionary() const noexcept { return _hash_dictionary; } + vespalib::MemoryUsage update_stat(IEnumStore& enum_store, const vespalib::datastore::CompactionStrategy &compaction_strategy); +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp index 6c929ad5981..8bc28abc238 100644 --- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp @@ -311,6 +311,165 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists( } template <> +bool +EnumStoreDictionary<EnumTree>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)>, const EntryRefFilter&) +{ + LOG_ABORT("should not be reached"); +} + +namespace { + +template <typename HashDictionaryT> +class ChangeWriterBase +{ +protected: + HashDictionaryT* _hash_dict; + static constexpr bool has_hash_dictionary = true; + ChangeWriterBase() + : _hash_dict(nullptr) + { + } +public: + void set_hash_dict(HashDictionaryT &hash_dict) { _hash_dict = &hash_dict; } +}; + +template <> +class ChangeWriterBase<vespalib::datastore::NoHashDictionary> +{ +protected: + static constexpr bool has_hash_dictionary = false; + ChangeWriterBase() = default; +}; + +template <typename HashDictionaryT> +class ChangeWriter : public ChangeWriterBase<HashDictionaryT> { + using Parent = ChangeWriterBase<HashDictionaryT>; + using Parent::has_hash_dictionary; + std::vector<std::pair<EntryRef,uint32_t*>> _tree_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + bool write(const std::vector<EntryRef>& refs); + void emplace_back(EntryRef key, uint32_t& tree_ref) { _tree_refs.emplace_back(std::make_pair(key, &tree_ref)); } +}; + +template <typename HashDictionaryT> +ChangeWriter<HashDictionaryT>::ChangeWriter(uint32_t capacity) + : ChangeWriterBase<HashDictionaryT>(), + _tree_refs() +{ + _tree_refs.reserve(capacity); +} + +template <typename HashDictionaryT> +ChangeWriter<HashDictionaryT>::~ChangeWriter() = default; + +template <typename HashDictionaryT> +bool +ChangeWriter<HashDictionaryT>::write(const std::vector<EntryRef> &refs) +{ + bool changed = false; + assert(refs.size() == _tree_refs.size()); + auto tree_ref = _tree_refs.begin(); + for (auto ref : refs) { + EntryRef old_ref(*tree_ref->second); + if (ref != old_ref) { + if (!changed) { + // Note: Needs review when porting to other platforms + // Assumes that other CPUs observes stores from this CPU in order + std::atomic_thread_fence(std::memory_order_release); + changed = true; + } + *tree_ref->second = ref.ref(); + if constexpr (has_hash_dictionary) { + auto find_result = this->_hash_dict->find(this->_hash_dict->get_default_comparator(), tree_ref->first); + assert(find_result != nullptr && find_result->first.load_relaxed() == tree_ref->first); + assert(find_result->second.load_relaxed() == old_ref); + find_result->second.store_release(ref); + } + } + ++tree_ref; + } + assert(tree_ref == _tree_refs.end()); + _tree_refs.clear(); + return changed; +} + +} + +template <typename BTreeDictionaryT, typename HashDictionaryT> +bool +EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) +{ + if constexpr (has_btree_dictionary) { + std::vector<EntryRef> refs; + refs.reserve(1024); + bool changed = false; + ChangeWriter<HashDictionaryT> change_writer(refs.capacity()); + if constexpr (has_hash_dictionary) { + change_writer.set_hash_dict(this->_hash_dict); + } + auto& dict = this->_btree_dict; + for (auto itr = dict.begin(); itr.valid(); ++itr) { + EntryRef ref(itr.getData()); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + change_writer.emplace_back(itr.getKey(), itr.getWData()); + if (refs.size() >= refs.capacity()) { + normalize(refs); + changed |= change_writer.write(refs); + refs.clear(); + } + } + } + } + if (!refs.empty()) { + normalize(refs); + changed |= change_writer.write(refs); + } + return changed; + } else { + return this->_hash_dict.normalize_values(normalize, filter); + } +} + +template <> +void +EnumStoreDictionary<EnumTree>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)>, const EntryRefFilter&) +{ + LOG_ABORT("should not be reached"); +} + +template <typename BTreeDictionaryT, typename HashDictionaryT> +void +EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) +{ + if constexpr (has_btree_dictionary) { + std::vector<EntryRef> refs; + refs.reserve(1024); + auto& dict = this->_btree_dict; + for (auto itr = dict.begin(); itr.valid(); ++itr) { + EntryRef ref(itr.getData()); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + if (refs.size() >= refs.capacity()) { + callback(refs); + refs.clear(); + } + } + } + } + if (!refs.empty()) { + callback(refs); + } + } else { + this->_hash_dict.foreach_value(callback, filter); + } +} + +template <> const EnumPostingTree & EnumStoreDictionary<EnumTree>::get_posting_dictionary() const { diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h index 4d0509c0eb1..db1176c5484 100644 --- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h +++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h @@ -16,6 +16,7 @@ template <typename BTreeDictionaryT, typename HashDictionaryT = vespalib::datast class EnumStoreDictionary : public vespalib::datastore::UniqueStoreDictionary<BTreeDictionaryT, IEnumStoreDictionary, HashDictionaryT> { protected: using EntryRef = IEnumStoreDictionary::EntryRef; + using EntryRefFilter = IEnumStoreDictionary::EntryRefFilter; using Index = IEnumStoreDictionary::Index; using BTreeDictionaryType = BTreeDictionaryT; using EntryComparator = IEnumStoreDictionary::EntryComparator; @@ -54,6 +55,8 @@ public: void clear_all_posting_lists(std::function<void(EntryRef)> clearer) override; void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override; bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) override; + bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) override; + void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) override; const EnumPostingTree& get_posting_dictionary() const override; }; diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp index 2c6ac521b30..3e578856c2b 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp @@ -81,7 +81,7 @@ void EnumAttribute<B>::populate_address_space_usage(AddressSpaceUsage& usage) const { B::populate_address_space_usage(usage); - usage.set(AddressSpaceComponents::enum_store, _enumStore.get_address_space_usage()); + usage.set(AddressSpaceComponents::enum_store, _enumStore.get_values_address_space_usage()); } } // namespace search diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h index a140a529c7d..7fe586b8ccc 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h @@ -2,6 +2,7 @@ #pragma once +#include "enum_store_compaction_spec.h" #include "enum_store_dictionary.h" #include "enum_store_loaders.h" #include "enumcomparator.h" @@ -55,10 +56,7 @@ private: bool _is_folded; ComparatorType _comparator; ComparatorType _foldedComparator; - vespalib::MemoryUsage _cached_values_memory_usage; - vespalib::AddressSpace _cached_values_address_space_usage; - vespalib::MemoryUsage _cached_dictionary_btree_usage; - vespalib::MemoryUsage _cached_dictionary_hash_usage; + enumstore::EnumStoreCompactionSpec _compaction_spec; EnumStoreT(const EnumStoreT & rhs) = delete; EnumStoreT & operator=(const EnumStoreT & rhs) = delete; @@ -96,7 +94,7 @@ public: vespalib::MemoryUsage get_values_memory_usage() const override { return _store.get_allocator().get_data_store().getMemoryUsage(); } vespalib::MemoryUsage get_dictionary_memory_usage() const override { return _dict->get_memory_usage(); } - vespalib::AddressSpace get_address_space_usage() const; + vespalib::AddressSpace get_values_address_space_usage() const override; void transfer_hold_lists(generation_t generation); void trim_hold_lists(generation_t first_used); @@ -199,9 +197,9 @@ public: bool find_index(EntryType value, Index& idx) const; void free_unused_values() override; void free_unused_values(IndexList to_remove); - vespalib::MemoryUsage update_stat() override; + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) override; std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override; - std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override; + std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) override; bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) override; uint64_t get_compaction_count() const override { return _store.get_data_store().get_compaction_count(); diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp index c202d780659..e1adca2b89a 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp @@ -18,10 +18,11 @@ #include <vespa/vespalib/datastore/unique_store_string_allocator.hpp> #include <vespa/vespalib/util/array.hpp> #include <vespa/searchlib/util/bufferwriter.h> -#include <vespa/searchcommon/common/compaction_strategy.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> namespace search { +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryComparator; std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary> @@ -77,8 +78,7 @@ EnumStoreT<EntryT>::EnumStoreT(bool has_postings, const DictionaryConfig & dict_ _is_folded(dict_cfg.getMatch() == DictionaryConfig::Match::UNCASED), _comparator(_store.get_data_store()), _foldedComparator(make_optionally_folded_comparator(is_folded())), - _cached_values_memory_usage(), - _cached_values_address_space_usage(0, 0, (1ull << 32)) + _compaction_spec() { _store.set_dictionary(make_enum_store_dictionary(*this, has_postings, dict_cfg, allocate_comparator(), @@ -91,9 +91,9 @@ EnumStoreT<EntryT>::~EnumStoreT() = default; template <typename EntryT> vespalib::AddressSpace -EnumStoreT<EntryT>::get_address_space_usage() const +EnumStoreT<EntryT>::get_values_address_space_usage() const { - return _store.get_address_space_usage(); + return _store.get_values_address_space_usage(); } template <typename EntryT> @@ -211,40 +211,26 @@ EnumStoreT<EntryT>::insert(EntryType value) template <typename EntryT> vespalib::MemoryUsage -EnumStoreT<EntryT>::update_stat() +EnumStoreT<EntryT>::update_stat(const CompactionStrategy& compaction_strategy) { - auto &store = _store.get_data_store(); - _cached_values_memory_usage = store.getMemoryUsage(); - _cached_values_address_space_usage = store.getAddressSpaceUsage(); - _cached_dictionary_btree_usage = _dict->get_btree_memory_usage(); - _cached_dictionary_hash_usage = _dict->get_hash_memory_usage(); - auto retval = _cached_values_memory_usage; - retval.merge(_cached_dictionary_btree_usage); - retval.merge(_cached_dictionary_hash_usage); - return retval; + return _compaction_spec.update_stat(*this, compaction_strategy); } template <typename EntryT> std::unique_ptr<IEnumStore::EnumIndexRemapper> EnumStoreT<EntryT>::consider_compact_values(const CompactionStrategy& compaction_strategy) { - size_t used_bytes = _cached_values_memory_usage.usedBytes(); - size_t dead_bytes = _cached_values_memory_usage.deadBytes(); - size_t used_address_space = _cached_values_address_space_usage.used(); - size_t dead_address_space = _cached_values_address_space_usage.dead(); - bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes); - bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space); - if (compact_memory || compact_address_space) { - return compact_worst_values(compact_memory, compact_address_space); + if (_compaction_spec.get_values().compact()) { + return compact_worst_values(_compaction_spec.get_values(), compaction_strategy); } return std::unique_ptr<IEnumStore::EnumIndexRemapper>(); } template <typename EntryT> std::unique_ptr<IEnumStore::EnumIndexRemapper> -EnumStoreT<EntryT>::compact_worst_values(bool compact_memory, bool compact_address_space) +EnumStoreT<EntryT>::compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - return _store.compact_worst(compact_memory, compact_address_space); + return _store.compact_worst(compaction_spec, compaction_strategy); } template <typename EntryT> @@ -254,16 +240,12 @@ EnumStoreT<EntryT>::consider_compact_dictionary(const CompactionStrategy& compac if (_dict->has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_dictionary_btree_usage.usedBytes(), - _cached_dictionary_btree_usage.deadBytes())) - { - _dict->compact_worst(true, false); + if (_compaction_spec.btree_dictionary()) { + _dict->compact_worst(true, false, compaction_strategy); return true; } - if (compaction_strategy.should_compact_memory(_cached_dictionary_hash_usage.usedBytes(), - _cached_dictionary_hash_usage.deadBytes())) - { - _dict->compact_worst(false, true); + if (_compaction_spec.hash_dictionary()) { + _dict->compact_worst(false, true, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h index 1f3165828bc..e3782514530 100644 --- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h +++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h @@ -6,10 +6,18 @@ #include "enum_store_types.h" #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/datastore/unique_store_enumerator.h> -#include <vespa/vespalib/util/memoryusage.h> + +namespace vespalib { + +class AddressSpace; +class MemoryUsage; + +} namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; class DataStoreBase; template <typename> class UniqueStoreRemapper; @@ -19,7 +27,6 @@ template <typename> class UniqueStoreRemapper; namespace search { class BufferWriter; -class CompactionStrategy; class IEnumStoreDictionary; /** @@ -30,6 +37,8 @@ public: using Index = enumstore::Index; using InternalIndex = enumstore::InternalIndex; using IndexVector = enumstore::IndexVector; + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EnumHandle = enumstore::EnumHandle; using EnumVector = enumstore::EnumVector; using EnumIndexRemapper = vespalib::datastore::UniqueStoreRemapper<InternalIndex>; @@ -49,10 +58,11 @@ public: virtual const IEnumStoreDictionary& get_dictionary() const = 0; virtual uint32_t get_num_uniques() const = 0; virtual vespalib::MemoryUsage get_values_memory_usage() const = 0; + virtual vespalib::AddressSpace get_values_address_space_usage() const = 0; virtual vespalib::MemoryUsage get_dictionary_memory_usage() const = 0; - virtual vespalib::MemoryUsage update_stat() = 0; + virtual vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) = 0; virtual std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) = 0; - virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) = 0; + virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0; virtual bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) = 0; virtual uint64_t get_compaction_count() const = 0; // Should only be used by unit tests. diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h index a8cf6881b86..a9716ec5d05 100644 --- a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h +++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h @@ -30,6 +30,7 @@ class IEnumStoreDictionary : public vespalib::datastore::IUniqueStoreDictionary public: using EntryRef = vespalib::datastore::EntryRef; using EntryComparator = vespalib::datastore::EntryComparator; + using EntryRefFilter = vespalib::datastore::EntryRefFilter; using EnumVector = IEnumStore::EnumVector; using Index = IEnumStore::Index; using IndexList = IEnumStore::IndexList; @@ -52,7 +53,25 @@ public: virtual Index remap_index(Index idx) = 0; virtual void clear_all_posting_lists(std::function<void(EntryRef)> clearer) = 0; virtual void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0; + /* + * Scan dictionary and call normalize function for each value. If + * returned value is different then write back the modified value to + * the dictionary. Only used by unit tests. + */ virtual bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) = 0; + /* + * Scan dictionary and call normalize function for batches of values + * that pass the filter. Write back modified values to the dictionary. + * Used by compaction of posting lists when moving short arrays, + * bitvectors or btree roots. + */ + virtual bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) = 0; + /* + * Scan dictionary and call callback function for batches of values + * that pass the filter. Used by compaction of posting lists when + * moving btree nodes. + */ + virtual void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) = 0; virtual const EnumPostingTree& get_posting_dictionary() const = 0; }; diff --git a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h index 135870e29a5..20cec9a31c2 100644 --- a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h +++ b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h @@ -4,7 +4,7 @@ #include <vespa/searchcommon/attribute/iattributevector.h> -namespace search { class CompactionStrategy; } +namespace vespalib::datastore { class CompactionStrategy; } namespace vespalib { class MemoryUsage; } @@ -13,16 +13,9 @@ namespace search::attribute { class IPostingListAttributeBase { public: - virtual - ~IPostingListAttributeBase() - { - } - - virtual void - clearPostings(IAttributeVector::EnumHandle eidx, - uint32_t fromLid, - uint32_t toLid) = 0; - + using CompactionStrategy = vespalib::datastore::CompactionStrategy; + virtual ~IPostingListAttributeBase() = default; + virtual void clearPostings(IAttributeVector::EnumHandle eidx, uint32_t fromLid, uint32_t toLid) = 0; virtual void forwardedShrinkLidSpace(uint32_t newSize) = 0; virtual vespalib::MemoryUsage getMemoryUsage() const = 0; virtual bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) = 0; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h index 9720e88543d..81abaa05a45 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h @@ -44,7 +44,7 @@ public: void doneLoadFromMultiValue() { _store.setInitializing(false); } - void compactWorst(bool compactMemory, bool compactAddressSpace) override; + void compactWorst(CompactionSpec compactionSpec, const CompactionStrategy& compaction_strategy) override; vespalib::AddressSpace getAddressSpaceUsage() const override; vespalib::MemoryUsage getArrayStoreMemoryUsage() const override; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp index 25065a200e9..fb81a60cb13 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp @@ -53,9 +53,9 @@ MultiValueMapping<EntryT,RefT>::replace(uint32_t docId, ConstArrayRef values) template <typename EntryT, typename RefT> void -MultiValueMapping<EntryT,RefT>::compactWorst(bool compactMemory, bool compactAddressSpace) +MultiValueMapping<EntryT,RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compactMemory, compactAddressSpace)); + vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compaction_spec, compaction_strategy)); if (compactionContext) { compactionContext->compact(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size())); } diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp index 2edc30cc2c4..b0d50c129c6 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp @@ -1,17 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "multi_value_mapping_base.h" -#include <vespa/searchcommon/common/compaction_strategy.h> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <cassert> namespace search::attribute { +using vespalib::datastore::CompactionStrategy; + MultiValueMappingBase::MultiValueMappingBase(const vespalib::GrowStrategy &gs, vespalib::GenerationHolder &genHolder) : _indices(gs, genHolder), _totalValues(0u), - _cachedArrayStoreMemoryUsage(), - _cachedArrayStoreAddressSpaceUsage(0, 0, (1ull << 32)) + _compaction_spec() { } @@ -65,11 +67,12 @@ MultiValueMappingBase::getMemoryUsage() const } vespalib::MemoryUsage -MultiValueMappingBase::updateStat() +MultiValueMappingBase::updateStat(const CompactionStrategy& compaction_strategy) { - _cachedArrayStoreAddressSpaceUsage = getAddressSpaceUsage(); - vespalib::MemoryUsage retval = getArrayStoreMemoryUsage(); - _cachedArrayStoreMemoryUsage = retval; + auto array_store_address_space_usage = getAddressSpaceUsage(); + auto array_store_memory_usage = getArrayStoreMemoryUsage(); + _compaction_spec = compaction_strategy.should_compact(array_store_memory_usage, array_store_address_space_usage); + auto retval = array_store_memory_usage; retval.merge(_indices.getMemoryUsage()); return retval; } @@ -77,14 +80,8 @@ MultiValueMappingBase::updateStat() bool MultiValueMappingBase::considerCompact(const CompactionStrategy &compactionStrategy) { - size_t usedBytes = _cachedArrayStoreMemoryUsage.usedBytes(); - size_t deadBytes = _cachedArrayStoreMemoryUsage.deadBytes(); - size_t usedArrays = _cachedArrayStoreAddressSpaceUsage.used(); - size_t deadArrays = _cachedArrayStoreAddressSpaceUsage.dead(); - bool compactMemory = compactionStrategy.should_compact_memory(usedBytes, deadBytes); - bool compactAddressSpace = compactionStrategy.should_compact_address_space(usedArrays, deadArrays); - if (compactMemory || compactAddressSpace) { - compactWorst(compactMemory, compactAddressSpace); + if (_compaction_spec.compact()) { + compactWorst(_compaction_spec, compactionStrategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h index 952e9dbbe56..f27a9f1667c 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h @@ -2,12 +2,16 @@ #pragma once +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/util/address_space.h> #include <vespa/vespalib/util/rcuvector.h> #include <functional> -namespace search { class CompactionStrategy; } +namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; +} namespace search::attribute { @@ -17,14 +21,15 @@ namespace search::attribute { class MultiValueMappingBase { public: + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EntryRef = vespalib::datastore::EntryRef; using RefVector = vespalib::RcuVectorBase<EntryRef>; protected: RefVector _indices; size_t _totalValues; - vespalib::MemoryUsage _cachedArrayStoreMemoryUsage; - vespalib::AddressSpace _cachedArrayStoreAddressSpaceUsage; + CompactionSpec _compaction_spec; MultiValueMappingBase(const vespalib::GrowStrategy &gs, vespalib::GenerationHolder &genHolder); virtual ~MultiValueMappingBase(); @@ -38,7 +43,7 @@ public: virtual vespalib::MemoryUsage getArrayStoreMemoryUsage() const = 0; virtual vespalib::AddressSpace getAddressSpaceUsage() const = 0; vespalib::MemoryUsage getMemoryUsage() const; - vespalib::MemoryUsage updateStat(); + vespalib::MemoryUsage updateStat(const CompactionStrategy& compaction_strategy); size_t getTotalValueCnt() const { return _totalValues; } RefCopyVector getRefCopy(uint32_t size) const; @@ -51,7 +56,7 @@ public: uint32_t getNumKeys() const { return _indices.size(); } uint32_t getCapacityKeys() const { return _indices.capacity(); } - virtual void compactWorst(bool compatMemory, bool compactAddressSpace) = 0; + virtual void compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0; bool considerCompact(const CompactionStrategy &compactionStrategy); }; diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp index b114a355bb4..8790bdd9885 100644 --- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp @@ -30,13 +30,17 @@ remap_enum_store_refs(const EnumIndexRemapper& remapper, AttributeVector& v, att v.logEnumStoreEvent("compactfixup", "drain"); { AttributeVector::EnumModifier enum_guard(v.getEnumModifier()); + auto& filter = remapper.get_entry_ref_filter(); v.logEnumStoreEvent("compactfixup", "start"); for (uint32_t doc = 0; doc < v.getNumDocs(); ++doc) { vespalib::ConstArrayRef<WeightedIndex> indicesRef(multi_value_mapping.get(doc)); WeightedIndexVector indices(indicesRef.cbegin(), indicesRef.cend()); for (uint32_t i = 0; i < indices.size(); ++i) { - EnumIndex oldIndex = indices[i].value(); - indices[i] = WeightedIndex(remapper.remap(oldIndex), indices[i].weight()); + EnumIndex ref = indices[i].value(); + if (ref.valid() && filter.has(ref)) { + ref = remapper.remap(ref); + } + indices[i] = WeightedIndex(ref, indices[i].weight()); } std::atomic_thread_fence(std::memory_order_release); multi_value_mapping.replace(doc, indices); diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp index acd03a37497..251bbd7c8a7 100644 --- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp @@ -207,8 +207,9 @@ MultiValueEnumAttribute<B, M>::onUpdateStat() { // update statistics vespalib::MemoryUsage total; - total.merge(this->_enumStore.update_stat()); - total.merge(this->_mvMapping.updateStat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_enumStore.update_stat(compaction_strategy)); + total.merge(this->_mvMapping.updateStat(compaction_strategy)); total.merge(this->getChangeVectorMemoryUsage()); mergeMemoryStats(total); this->updateStatistics(this->_mvMapping.getTotalValueCnt(), this->_enumStore.get_num_uniques(), total.allocatedBytes(), diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp index 454eddeb6d4..10f837ec1ab 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multinumericattribute.hpp @@ -76,7 +76,8 @@ MultiValueNumericAttribute<B, M>::onCommit() template <typename B, typename M> void MultiValueNumericAttribute<B, M>::onUpdateStat() { - vespalib::MemoryUsage usage = this->_mvMapping.updateStat(); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + vespalib::MemoryUsage usage = this->_mvMapping.updateStat(compaction_strategy); usage.merge(this->getChangeVectorMemoryUsage()); this->updateStatistics(this->_mvMapping.getTotalValueCnt(), this->_mvMapping.getTotalValueCnt(), usage.allocatedBytes(), usage.usedBytes(), usage.deadBytes(), usage.allocatedBytesOnHold()); diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp index a655c30bc37..051a22bd5e8 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp @@ -18,7 +18,8 @@ template <typename B, typename M> void MultiValueNumericPostingAttribute<B, M>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->getPostingList().update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->getPostingList().update_stat(compaction_strategy)); } template <typename B, typename M> diff --git a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp index 2abe5894163..2bb4d2ada60 100644 --- a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp @@ -63,7 +63,8 @@ template <typename B, typename T> void MultiValueStringPostingAttributeT<B, T>::mergeMemoryStats(vespalib::MemoryUsage &total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B, typename T> diff --git a/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h new file mode 100644 index 00000000000..50b5402056f --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/posting_store_compaction_spec.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::attribute { + +/* + * Class describing how to compact a posting store + */ +class PostingStoreCompactionSpec { + bool _btree_nodes; // btree nodes + bool _store; // short arrays, b-tree roots, bitvectors +public: + PostingStoreCompactionSpec() noexcept + : _btree_nodes(false), + _store(false) + { + } + PostingStoreCompactionSpec(bool btree_nodes_, bool store_) noexcept + : _btree_nodes(btree_nodes_), + _store(store_) + { + } + bool btree_nodes() const noexcept { return _btree_nodes; } + bool store() const noexcept { return _store; } +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp index 3451c2b0456..df016b050af 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp @@ -7,11 +7,14 @@ #include <vespa/vespalib/btree/btreeiterator.hpp> #include <vespa/vespalib/btree/btreerootbase.cpp> #include <vespa/vespalib/datastore/datastore.hpp> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/entry_ref_filter.h> #include <vespa/vespalib/datastore/buffer_type.hpp> namespace search::attribute { using vespalib::btree::BTreeNoLeafData; +using vespalib::datastore::EntryRefFilter; // #define FORCE_BITVECTORS @@ -33,8 +36,7 @@ PostingStoreBase2::PostingStoreBase2(IEnumStoreDictionary& dictionary, Status &s _dictionary(dictionary), _status(status), _bvExtraBytes(0), - _cached_allocator_memory_usage(), - _cached_store_memory_usage() + _compaction_spec() { } @@ -127,45 +129,47 @@ PostingStore<DataT>::removeSparseBitVectors() } } if (needscan) { - res = _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { return consider_remove_sparse_bitvector(posting_idx); }); + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + filter.add_buffers(_bvType.get_active_buffers()); + res = _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs) + { consider_remove_sparse_bitvector(refs); }, + filter); } return res; } template <typename DataT> -typename PostingStore<DataT>::EntryRef -PostingStore<DataT>::consider_remove_sparse_bitvector(EntryRef ref) +void +PostingStore<DataT>::consider_remove_sparse_bitvector(std::vector<EntryRef>& refs) { - if (!ref.valid() || !isBitVector(getTypeId(EntryRef(ref)))) { - return ref; - } - RefType iRef(ref); - uint32_t typeId = getTypeId(iRef); - assert(isBitVector(typeId)); - assert(_bvs.find(ref.ref() )!= _bvs.end()); - BitVectorEntry *bve = getWBitVectorEntry(iRef); - BitVector &bv = *bve->_bv.get(); - uint32_t docFreq = bv.countTrueBits(); - if (bve->_tree.valid()) { - RefType iRef2(bve->_tree); - assert(isBTree(iRef2)); - const BTreeType *tree = getTreeEntry(iRef2); - assert(tree->size(_allocator) == docFreq); - (void) tree; - } - if (docFreq < _minBvDocFreq) { - dropBitVector(ref); - if (ref.valid()) { + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + assert(isBitVector(typeId)); + assert(_bvs.find(iRef.ref()) != _bvs.end()); + BitVectorEntry *bve = getWBitVectorEntry(iRef); + BitVector &bv = *bve->_bv.get(); + uint32_t docFreq = bv.countTrueBits(); + if (bve->_tree.valid()) { + RefType iRef2(bve->_tree); + assert(isBTree(iRef2)); + const BTreeType *tree = getTreeEntry(iRef2); + assert(tree->size(_allocator) == docFreq); + (void) tree; + } + if (docFreq < _minBvDocFreq) { + dropBitVector(ref); iRef = ref; - typeId = getTypeId(iRef); - if (isBTree(typeId)) { - BTreeType *tree = getWTreeEntry(iRef); - normalizeTree(ref, tree, false); + if (iRef.valid()) { + typeId = getTypeId(iRef); + if (isBTree(typeId)) { + BTreeType *tree = getWTreeEntry(iRef); + normalizeTree(ref, tree, false); + } } } } - return ref; } template <typename DataT> @@ -632,13 +636,14 @@ PostingStore<DataT>::getMemoryUsage() const template <typename DataT> vespalib::MemoryUsage -PostingStore<DataT>::update_stat() +PostingStore<DataT>::update_stat(const CompactionStrategy& compaction_strategy) { vespalib::MemoryUsage usage; - _cached_allocator_memory_usage = _allocator.getMemoryUsage(); - _cached_store_memory_usage = _store.getMemoryUsage(); - usage.merge(_cached_allocator_memory_usage); - usage.merge(_cached_store_memory_usage); + auto btree_nodes_memory_usage = _allocator.getMemoryUsage(); + auto store_memory_usage = _store.getMemoryUsage(); + _compaction_spec = PostingStoreCompactionSpec(compaction_strategy.should_compact_memory(btree_nodes_memory_usage), compaction_strategy.should_compact_memory(store_memory_usage)); + usage.merge(btree_nodes_memory_usage); + usage.merge(store_memory_usage); uint64_t bvExtraBytes = _bvExtraBytes; usage.incUsedBytes(bvExtraBytes); usage.incAllocatedBytes(bvExtraBytes); @@ -647,96 +652,114 @@ PostingStore<DataT>::update_stat() template <typename DataT> void -PostingStore<DataT>::move_btree_nodes(EntryRef ref) +PostingStore<DataT>::move_btree_nodes(const std::vector<EntryRef>& refs) { - if (ref.valid()) { + for (auto ref : refs) { RefType iRef(ref); + assert(iRef.valid()); uint32_t typeId = getTypeId(iRef); uint32_t clusterSize = getClusterSize(typeId); - if (clusterSize == 0) { - if (isBitVector(typeId)) { - BitVectorEntry *bve = getWBitVectorEntry(iRef); - RefType iRef2(bve->_tree); - if (iRef2.valid()) { - assert(isBTree(iRef2)); - BTreeType *tree = getWTreeEntry(iRef2); - tree->move_nodes(_allocator); - } - } else { - BTreeType *tree = getWTreeEntry(iRef); + assert(clusterSize == 0); + if (isBitVector(typeId)) { + BitVectorEntry *bve = getWBitVectorEntry(iRef); + RefType iRef2(bve->_tree); + if (iRef2.valid()) { + assert(isBTree(iRef2)); + BTreeType *tree = getWTreeEntry(iRef2); tree->move_nodes(_allocator); } + } else { + assert(isBTree(typeId)); + BTreeType *tree = getWTreeEntry(iRef); + tree->move_nodes(_allocator); } } } template <typename DataT> -typename PostingStore<DataT>::EntryRef -PostingStore<DataT>::move(EntryRef ref) +void +PostingStore<DataT>::move(std::vector<EntryRef>& refs) { - if (!ref.valid()) { - return EntryRef(); - } - RefType iRef(ref); - uint32_t typeId = getTypeId(iRef); - uint32_t clusterSize = getClusterSize(typeId); - if (clusterSize == 0) { - if (isBitVector(typeId)) { - BitVectorEntry *bve = getWBitVectorEntry(iRef); - RefType iRef2(bve->_tree); - if (iRef2.valid()) { - assert(isBTree(iRef2)); - if (_store.getCompacting(iRef2)) { - BTreeType *tree = getWTreeEntry(iRef2); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - bve->_tree = ref_and_ptr.ref; + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + uint32_t clusterSize = getClusterSize(typeId); + if (clusterSize == 0) { + if (isBitVector(typeId)) { + BitVectorEntry *bve = getWBitVectorEntry(iRef); + RefType iRef2(bve->_tree); + if (iRef2.valid()) { + assert(isBTree(iRef2)); + if (_store.getCompacting(iRef2)) { + BTreeType *tree = getWTreeEntry(iRef2); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + // Note: Needs review when porting to other platforms + // Assumes that other CPUs observes stores from this CPU in order + std::atomic_thread_fence(std::memory_order_release); + bve->_tree = ref_and_ptr.ref; + } } + if (_store.getCompacting(iRef)) { + auto new_ref = allocBitVectorCopy(*bve).ref; + _bvs.erase(iRef.ref()); + _bvs.insert(new_ref.ref()); + ref = new_ref; + } + } else { + assert(isBTree(typeId)); + assert(_store.getCompacting(iRef)); + BTreeType *tree = getWTreeEntry(iRef); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + ref = ref_and_ptr.ref; } - if (!_store.getCompacting(ref)) { - return ref; - } - auto new_ref = allocBitVectorCopy(*bve).ref; - _bvs.erase(ref.ref()); - _bvs.insert(new_ref.ref()); - return new_ref; } else { - if (!_store.getCompacting(ref)) { - return ref; - } - BTreeType *tree = getWTreeEntry(iRef); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - return ref_and_ptr.ref; + assert(_store.getCompacting(iRef)); + const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); + ref = allocKeyDataCopy(shortArray, clusterSize).ref; } } - if (!_store.getCompacting(ref)) { - return ref; - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return allocKeyDataCopy(shortArray, clusterSize).ref; } template <typename DataT> void -PostingStore<DataT>::compact_worst_btree_nodes() +PostingStore<DataT>::compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) { - auto to_hold = this->start_compact_worst_btree_nodes(); - _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { - move_btree_nodes(posting_idx); - return posting_idx; - }); + auto to_hold = this->start_compact_worst_btree_nodes(compaction_strategy); + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + // Only look at buffers containing bitvectors and btree roots + filter.add_buffers(this->_treeType.get_active_buffers()); + filter.add_buffers(_bvType.get_active_buffers()); + _dictionary.foreach_posting_list([this](const std::vector<EntryRef>& refs) + { move_btree_nodes(refs); }, filter); this->finish_compact_worst_btree_nodes(to_hold); } template <typename DataT> void -PostingStore<DataT>::compact_worst_buffers() +PostingStore<DataT>::compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto to_hold = this->start_compact_worst_buffers(); - _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef - { return move(posting_idx); }); + + auto to_hold = this->start_compact_worst_buffers(compaction_spec, compaction_strategy); + bool compact_btree_roots = false; + EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits); + filter.add_buffers(to_hold); + // Start with looking at buffers being compacted + for (uint32_t buffer_id : to_hold) { + if (isBTree(_store.getBufferState(buffer_id).getTypeId())) { + compact_btree_roots = true; + } + } + if (compact_btree_roots) { + // If we are compacting btree roots then we also have to look at bitvector + // buffers + filter.add_buffers(_bvType.get_active_buffers()); + } + _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs) + { return move(refs); }, + filter); this->finishCompact(to_hold); } @@ -747,8 +770,8 @@ PostingStore<DataT>::consider_compact_worst_btree_nodes(const CompactionStrategy if (_allocator.getNodeStore().has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_allocator_memory_usage.usedBytes(), _cached_allocator_memory_usage.deadBytes())) { - compact_worst_btree_nodes(); + if (_compaction_spec.btree_nodes()) { + compact_worst_btree_nodes(compaction_strategy); return true; } return false; @@ -761,8 +784,9 @@ PostingStore<DataT>::consider_compact_worst_buffers(const CompactionStrategy& co if (_store.has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_store_memory_usage.usedBytes(), _cached_store_memory_usage.deadBytes())) { - compact_worst_buffers(); + if (_compaction_spec.store()) { + CompactionSpec compaction_spec(true, false); + compact_worst_buffers(compaction_spec, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.h b/searchlib/src/vespa/searchlib/attribute/postingstore.h index a0f0be1c430..949a355bc9d 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingstore.h +++ b/searchlib/src/vespa/searchlib/attribute/postingstore.h @@ -4,6 +4,7 @@ #include "enum_store_dictionary.h" #include "postinglisttraits.h" +#include "posting_store_compaction_spec.h" #include <set> namespace search { @@ -47,8 +48,7 @@ protected: IEnumStoreDictionary& _dictionary; Status &_status; uint64_t _bvExtraBytes; - vespalib::MemoryUsage _cached_allocator_memory_usage; - vespalib::MemoryUsage _cached_store_memory_usage; + PostingStoreCompactionSpec _compaction_spec; static constexpr uint32_t BUFFERTYPE_BITVECTOR = 9u; @@ -77,6 +77,8 @@ public: typedef typename Parent::AggregatedType AggregatedType; typedef typename Parent::BTreeTypeRefPair BTreeTypeRefPair; typedef typename Parent::Builder Builder; + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; typedef vespalib::datastore::EntryRef EntryRef; typedef std::less<uint32_t> CompareT; using Parent::applyNewArray; @@ -89,6 +91,7 @@ public: using Parent::getWTreeEntry; using Parent::getTreeEntry; using Parent::getKeyDataEntry; + using Parent::isBTree; using Parent::clusterLimit; using Parent::allocBTree; using Parent::allocBTreeCopy; @@ -105,10 +108,8 @@ public: ~PostingStore(); bool removeSparseBitVectors() override; - EntryRef consider_remove_sparse_bitvector(EntryRef ref); + void consider_remove_sparse_bitvector(std::vector<EntryRef> &refs); static bool isBitVector(uint32_t typeId) { return typeId == BUFFERTYPE_BITVECTOR; } - static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; } - bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); } void applyNew(EntryRef &ref, AddIter a, AddIter ae); @@ -186,13 +187,13 @@ public: static inline DataT bitVectorWeight(); vespalib::MemoryUsage getMemoryUsage() const; - vespalib::MemoryUsage update_stat(); + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy); - void move_btree_nodes(EntryRef ref); - EntryRef move(EntryRef ref); + void move_btree_nodes(const std::vector<EntryRef> &refs); + void move(std::vector<EntryRef>& refs); - void compact_worst_btree_nodes(); - void compact_worst_buffers(); + void compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); + void compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy); private: diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp index d9024af724b..6268a6da701 100644 --- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp @@ -194,7 +194,7 @@ PredicateAttribute::onLoad(vespalib::Executor *) buffer.moveFreeToData(size); const GenericHeader &header = loaded_buffer->getHeader(); - auto attributeHeader = attribute::AttributeHeader::extractTags(header); + auto attributeHeader = attribute::AttributeHeader::extractTags(header, getBaseFileName()); uint32_t version = attributeHeader.getVersion(); setCreateSerialNum(attributeHeader.getCreateSerialNum()); diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp index eb822313d61..4212a4ad247 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp @@ -24,6 +24,7 @@ namespace search::attribute { using document::DocumentId; using document::GlobalId; using document::IdParseException; +using vespalib::datastore::CompactionSpec; namespace { @@ -42,8 +43,7 @@ ReferenceAttribute::ReferenceAttribute(const vespalib::stringref baseFileName, : NotImplementedAttribute(baseFileName, cfg), _store(), _indices(getGenerationHolder()), - _cached_unique_store_values_memory_usage(), - _cached_unique_store_dictionary_memory_usage(), + _compaction_spec(), _gidToLidMapperFactory(), _referenceMappings(getGenerationHolder(), getCommittedDocIdLimitRef()) { @@ -191,11 +191,13 @@ ReferenceAttribute::onCommit() void ReferenceAttribute::onUpdateStat() { + auto& compaction_strategy = getConfig().getCompactionStrategy(); vespalib::MemoryUsage total = _store.get_values_memory_usage(); - _cached_unique_store_values_memory_usage = total; auto& dictionary = _store.get_dictionary(); - _cached_unique_store_dictionary_memory_usage = dictionary.get_memory_usage(); - total.merge(_cached_unique_store_dictionary_memory_usage); + auto dictionary_memory_usage = dictionary.get_memory_usage(); + _compaction_spec = ReferenceAttributeCompactionSpec(compaction_strategy.should_compact_memory(total), + compaction_strategy.should_compact_memory(dictionary_memory_usage)); + total.merge(dictionary_memory_usage); total.mergeGenerationHeldBytes(getGenerationHolder().getHeldBytes()); total.merge(_indices.getMemoryUsage()); total.merge(_referenceMappings.getMemoryUsage()); @@ -291,20 +293,18 @@ ReferenceAttribute::getReference(DocId doc) const bool ReferenceAttribute::consider_compact_values(const CompactionStrategy &compactionStrategy) { - size_t used_bytes = _cached_unique_store_values_memory_usage.usedBytes(); - size_t dead_bytes = _cached_unique_store_values_memory_usage.deadBytes(); - bool compact_memory = compactionStrategy.should_compact_memory(used_bytes, dead_bytes); - if (compact_memory) { - compact_worst_values(); + if (_compaction_spec.values()) { + compact_worst_values(compactionStrategy); return true; } return false; } void -ReferenceAttribute::compact_worst_values() +ReferenceAttribute::compact_worst_values(const CompactionStrategy& compaction_strategy) { - auto remapper(_store.compact_worst(true, true)); + CompactionSpec compaction_spec(true, true); + auto remapper(_store.compact_worst(compaction_spec, compaction_strategy)); if (remapper) { remapper->remap(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size())); remapper->done(); @@ -318,10 +318,8 @@ ReferenceAttribute::consider_compact_dictionary(const CompactionStrategy &compac if (dictionary.has_held_buffers()) { return false; } - if (compaction_strategy.should_compact_memory(_cached_unique_store_dictionary_memory_usage.usedBytes(), - _cached_unique_store_dictionary_memory_usage.deadBytes())) - { - dictionary.compact_worst(true, true); + if (_compaction_spec.dictionary()) { + dictionary.compact_worst(true, true, compaction_strategy); return true; } return false; diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h index 4016230ef89..f985c799c07 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h @@ -4,6 +4,7 @@ #include "not_implemented_attribute.h" #include "reference.h" +#include "reference_attribute_compaction_spec.h" #include "reference_mappings.h" #include <vespa/vespalib/datastore/unique_store.h> #include <vespa/vespalib/util/rcuvector.h> @@ -25,6 +26,7 @@ namespace search::attribute { class ReferenceAttribute : public NotImplementedAttribute { public: + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using EntryRef = vespalib::datastore::EntryRef; using GlobalId = document::GlobalId; using ReferenceStore = vespalib::datastore::UniqueStore<Reference>; @@ -42,8 +44,7 @@ public: private: ReferenceStore _store; ReferenceStoreIndices _indices; - vespalib::MemoryUsage _cached_unique_store_values_memory_usage; - vespalib::MemoryUsage _cached_unique_store_dictionary_memory_usage; + ReferenceAttributeCompactionSpec _compaction_spec; std::shared_ptr<IGidToLidMapperFactory> _gidToLidMapperFactory; ReferenceMappings _referenceMappings; @@ -57,7 +58,7 @@ private: uint64_t getUniqueValueCount() const override; bool consider_compact_values(const CompactionStrategy &compactionStrategy); - void compact_worst_values(); + void compact_worst_values(const CompactionStrategy& compaction_strategy); bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy); IndicesCopyVector getIndicesCopy(uint32_t size) const; void removeReverseMapping(EntryRef oldRef, uint32_t lid); diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h new file mode 100644 index 00000000000..dda44fdcd96 --- /dev/null +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_compaction_spec.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::attribute { + +/* + * Class describing how to compact a reference attribute + */ +class ReferenceAttributeCompactionSpec { + bool _values; + bool _dictionary; +public: + ReferenceAttributeCompactionSpec() noexcept + : _values(false), + _dictionary(false) + { + } + ReferenceAttributeCompactionSpec(bool values_, bool dictionary_) noexcept + : _values(values_), + _dictionary(dictionary_) + { + } + bool values() const noexcept { return _values; } + bool dictionary() const noexcept { return _dictionary; } +}; + +} diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp index 4323e57f6b1..18805a7b20f 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp @@ -49,13 +49,16 @@ SingleValueEnumAttributeBase::remap_enum_store_refs(const EnumIndexRemapper& rem { // update _enumIndices with new EnumIndex values after enum store has been compacted. v.logEnumStoreEvent("reenumerate", "reserved"); - auto new_indexes = std::make_unique<vespalib::Array<EnumIndex>>(); - new_indexes->reserve(_enumIndices.capacity()); + vespalib::Array<EnumIndex> new_indexes; + new_indexes.reserve(_enumIndices.capacity()); v.logEnumStoreEvent("reenumerate", "start"); + auto& filter = remapper.get_entry_ref_filter(); for (uint32_t i = 0; i < _enumIndices.size(); ++i) { - EnumIndex old_index = _enumIndices[i]; - EnumIndex new_index = remapper.remap(old_index); - new_indexes->push_back_fast(new_index); + EnumIndex ref = _enumIndices[i]; + if (ref.valid() && filter.has(ref)) { + ref = remapper.remap(ref); + } + new_indexes.push_back_fast(ref); } v.logEnumStoreEvent("compactfixup", "drain"); { diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp index 398625891b6..dde853cbc90 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp @@ -125,8 +125,9 @@ SingleValueEnumAttribute<B>::onUpdateStat() { // update statistics vespalib::MemoryUsage total = _enumIndices.getMemoryUsage(); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); total.mergeGenerationHeldBytes(getGenerationHolder().getHeldBytes()); - total.merge(this->_enumStore.update_stat()); + total.merge(this->_enumStore.update_stat(compaction_strategy)); total.merge(this->getChangeVectorMemoryUsage()); mergeMemoryStats(total); this->updateStatistics(_enumIndices.size(), this->_enumStore.get_num_uniques(), total.allocatedBytes(), diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp index e56bd5aacb1..1083d0f4cb8 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp @@ -36,7 +36,8 @@ template <typename B> void SingleValueNumericPostingAttribute<B>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B> diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp index af31295d083..e77c59e915d 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp @@ -34,7 +34,8 @@ template <typename B> void SingleValueStringPostingAttributeT<B>::mergeMemoryStats(vespalib::MemoryUsage & total) { - total.merge(this->_postingList.update_stat()); + auto& compaction_strategy = this->getConfig().getCompactionStrategy(); + total.merge(this->_postingList.update_stat(compaction_strategy)); } template <typename B> diff --git a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h index bb404f27709..cea251272dc 100644 --- a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h +++ b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h @@ -11,7 +11,7 @@ namespace search::common { * Interface for a component that has a lid space that can be compacted and shrunk. */ struct ICompactableLidSpace { - virtual ~ICompactableLidSpace() {} + virtual ~ICompactableLidSpace() = default; /** * Compacts the lid space down to the wanted given doc id limit. diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.cpp b/searchlib/src/vespa/searchlib/docstore/compacter.cpp index 38f3fbef0b0..26fb79f8a4e 100644 --- a/searchlib/src/vespa/searchlib/docstore/compacter.cpp +++ b/searchlib/src/vespa/searchlib/docstore/compacter.cpp @@ -26,7 +26,7 @@ BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, const Compress _bucketizer(bucketizer), _writeCount(0), _maxBucketGuardDuration(vespalib::duration::zero()), - _lastSample(), + _lastSample(vespalib::steady_clock::now()), _lock(), _backingMemory(Alloc::alloc(0x40000000), &_lock), _tmpStore(), diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp index 7aaee7180df..b4ff050c0f6 100644 --- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp @@ -112,7 +112,6 @@ public: } -using VisitCache = docstore::VisitCache; using docstore::Value; bool @@ -239,7 +238,14 @@ DocumentStore::remove(uint64_t syncToken, DocumentIdT lid) } void -DocumentStore::compact(uint64_t syncToken) +DocumentStore::compactBloat(uint64_t syncToken) +{ + (void) syncToken; + // Most implementations does not offer compact. +} + +void +DocumentStore::compactSpread(uint64_t syncToken) { (void) syncToken; // Most implementations does not offer compact. diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h index b6021d34bef..6402c16cd5e 100644 --- a/searchlib/src/vespa/searchlib/docstore/documentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h @@ -72,7 +72,8 @@ public: void remove(uint64_t syncToken, DocumentIdT lid) override; void flush(uint64_t syncToken) override; uint64_t initFlush(uint64_t synctoken) override; - void compact(uint64_t syncToken) override; + void compactBloat(uint64_t syncToken) override; + void compactSpread(uint64_t syncToken) override; uint64_t lastSyncToken() const override; uint64_t tentativeLastSyncToken() const override; vespalib::system_time getLastFlushTime() const override; @@ -80,7 +81,7 @@ public: size_t memoryUsed() const override { return _backingStore.memoryUsed(); } size_t getDiskFootprint() const override { return _backingStore.getDiskFootprint(); } size_t getDiskBloat() const override { return _backingStore.getDiskBloat(); } - size_t getMaxCompactGain() const override { return _backingStore.getMaxCompactGain(); } + size_t getMaxSpreadAsBloat() const override { return _backingStore.getMaxSpreadAsBloat(); } CacheStats getCacheStats() const override; size_t memoryMeta() const override { return _backingStore.memoryMeta(); } const vespalib::string & getBaseDir() const override { return _backingStore.getBaseDir(); } diff --git a/searchlib/src/vespa/searchlib/docstore/idatastore.h b/searchlib/src/vespa/searchlib/docstore/idatastore.h index b18bb0a3827..fc0eae1d15e 100644 --- a/searchlib/src/vespa/searchlib/docstore/idatastore.h +++ b/searchlib/src/vespa/searchlib/docstore/idatastore.h @@ -17,14 +17,14 @@ class IBufferVisitor; class IDataStoreVisitor { public: - virtual ~IDataStoreVisitor() { } + virtual ~IDataStoreVisitor() = default; virtual void visit(uint32_t lid, const void *buffer, size_t sz) = 0; }; class IDataStoreVisitorProgress { public: - virtual ~IDataStoreVisitorProgress() { } + virtual ~IDataStoreVisitorProgress() = default; virtual void updateProgress(double progress) = 0; }; @@ -46,11 +46,7 @@ public: * @param dirName The directory that will contain the data file. **/ IDataStore(const vespalib::string & dirName); - - /** - * Allow inhertitance. - **/ - virtual ~IDataStore(); + ~IDataStore() override; /** * Read data from the data store into a buffer. @@ -125,7 +121,7 @@ public: * to avoid misuse we let the report a more conservative number here if necessary. * @return diskspace to be gained. */ - virtual size_t getMaxCompactGain() const { return getDiskBloat(); } + virtual size_t getMaxSpreadAsBloat() const = 0; /** diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp index e1558f2238b..4f9b91f3e15 100644 --- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp @@ -5,10 +5,6 @@ namespace search { -IDocumentStore::IDocumentStore() = default; - -IDocumentStore::~IDocumentStore() = default; - void IDocumentStore::visit(const LidVector & lids, const document::DocumentTypeRepo &repo, IDocumentVisitor & visitor) const { for (uint32_t lid : lids) { visitor.visit(lid, read(lid, repo)); diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h index 2a7864a6f47..d84a5ad7e7e 100644 --- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h @@ -22,7 +22,7 @@ class IDocumentStoreReadVisitor { public: using DocumentSP = std::shared_ptr<document::Document>; - virtual ~IDocumentStoreReadVisitor() { } + virtual ~IDocumentStoreReadVisitor() = default; virtual void visit(uint32_t lid, const DocumentSP &doc) = 0; virtual void visit(uint32_t lid) = 0; }; @@ -31,14 +31,14 @@ class IDocumentStoreRewriteVisitor { public: using DocumentSP = std::shared_ptr<document::Document>; - virtual ~IDocumentStoreRewriteVisitor() { } + virtual ~IDocumentStoreRewriteVisitor() = default; virtual void visit(uint32_t lid, const DocumentSP &doc) = 0; }; class IDocumentStoreVisitorProgress { public: - virtual ~IDocumentStoreVisitorProgress() { } + virtual ~IDocumentStoreVisitorProgress() = default; virtual void updateProgress(double progress) = 0; }; @@ -47,7 +47,7 @@ class IDocumentVisitor { public: using DocumentUP = std::unique_ptr<document::Document>; - virtual ~IDocumentVisitor() { } + virtual ~IDocumentVisitor() = default; virtual void visit(uint32_t lid, DocumentUP doc) = 0; virtual bool allowVisitCaching() const = 0; private: @@ -68,17 +68,6 @@ public: using LidVector = std::vector<uint32_t>; using DocumentUP = std::unique_ptr<document::Document>; - - /** - * Construct a document store. - * - * @throws vespalib::IoException if the file is corrupt or other IO problems occur. - * @param docMan The document type manager to use when deserializing. - * @param baseDir The path to a directory where the implementaion specific files will reside. - **/ - IDocumentStore(); - virtual ~IDocumentStore(); - /** * Make a Document from a stored serialized data blob. * @param lid The local ID associated with the document. @@ -111,7 +100,8 @@ public: /** * If possible compact the disk. **/ - virtual void compact(uint64_t syncToken) = 0; + virtual void compactBloat(uint64_t syncToken) = 0; + virtual void compactSpread(uint64_t syncToken) = 0; /** * The sync token used for the last successful flush() operation, @@ -164,12 +154,11 @@ public: virtual size_t getDiskBloat() const = 0; /** - * Calculates how much diskspace can be compacted during a flush. - * default is to return th ebloat limit, but as some targets have some internal limits - * to avoid misuse we let the report a more conservative number here if necessary. - * @return diskspace to be gained. + * Calculates the gain from keeping buckets close. It is converted to diskbloat + * so it can be prioritized accordingly. + * @return spread as disk bloat. */ - virtual size_t getMaxCompactGain() const { return getDiskBloat(); } + virtual size_t getMaxSpreadAsBloat() const = 0; /** * Returns statistics about the cache. diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp index fd25dd56235..6a9ae40cc93 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp @@ -36,7 +36,6 @@ using namespace std::literals; LogDataStore::Config::Config() : _maxFileSize(DEFAULT_MAX_FILESIZE), - _maxDiskBloatFactor(0.2), _maxBucketSpread(2.5), _minFileSizeFactor(0.2), _maxNumLids(DEFAULT_MAX_LIDS_PER_FILE), @@ -48,7 +47,6 @@ LogDataStore::Config::Config() bool LogDataStore::Config::operator == (const Config & rhs) const { return (_maxBucketSpread == rhs._maxBucketSpread) && - (_maxDiskBloatFactor == rhs._maxDiskBloatFactor) && (_maxFileSize == rhs._maxFileSize) && (_minFileSizeFactor == rhs._minFileSizeFactor) && (_skipCrcOnRead == rhs._skipCrcOnRead) && @@ -294,46 +292,14 @@ vespalib::string bloatMsg(size_t bloat, size_t usage) { } -void -LogDataStore::compact(uint64_t syncToken) -{ - uint64_t usage = getDiskFootprint(); - uint64_t bloat = getDiskBloat(); - LOG(debug, "%s", bloatMsg(bloat, usage).c_str()); - const bool doCompact = (_fileChunks.size() > 1); - if (doCompact) { - LOG(info, "%s. Will compact", bloatMsg(bloat, usage).c_str()); - compactWorst(_config.getMaxDiskBloatFactor(), _config.getMaxBucketSpread(), isTotalDiskBloatExceeded(usage, bloat)); - } - flushActiveAndWait(syncToken); - if (doCompact) { - usage = getDiskFootprint(); - bloat = getDiskBloat(); - LOG(info, "Done compacting. %s", bloatMsg(bloat, usage).c_str()); - } -} - -bool -LogDataStore::isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const { - const size_t maxConfiguredDiskBloat = diskFootPrint * _config.getMaxDiskBloatFactor(); - return bloat > maxConfiguredDiskBloat; -} - size_t -LogDataStore::getMaxCompactGain() const +LogDataStore::getMaxSpreadAsBloat() const { - size_t bloat = getDiskBloat(); const size_t diskFootPrint = getDiskFootprint(); - if ( ! isTotalDiskBloatExceeded(diskFootPrint, bloat) ) { - bloat = 0; - } - const double maxSpread = getMaxBucketSpread(); - size_t spreadAsBloat = diskFootPrint * (1.0 - 1.0/maxSpread); - if ( maxSpread < _config.getMaxBucketSpread()) { - spreadAsBloat = 0; - } - return (bloat + spreadAsBloat); + return (maxSpread > _config.getMaxBucketSpread()) + ? diskFootPrint * (1.0 - 1.0/maxSpread) + : 0; } void @@ -380,40 +346,34 @@ LogDataStore::getMaxBucketSpread() const } std::pair<bool, LogDataStore::FileId> -LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat) +LogDataStore::findNextToCompact(bool dueToBloat) { typedef std::multimap<double, FileId, std::greater<double>> CostMap; - CostMap worstBloat; - CostMap worstSpread; + CostMap worst; MonitorGuard guard(_updateLock); for (size_t i(0); i < _fileChunks.size(); i++) { const auto & fc(_fileChunks[i]); if (fc && fc->frozen() && (_currentlyCompacting.find(fc->getNameId()) == _currentlyCompacting.end())) { uint64_t usage = fc->getDiskFootprint(); - uint64_t bloat = fc->getDiskBloat(); - if (_bucketizer) { - worstSpread.emplace(fc->getBucketSpread(), FileId(i)); - } - if (usage > 0) { - double tmp(double(bloat)/usage); - worstBloat.emplace(tmp, FileId(i)); + if ( ! dueToBloat && _bucketizer) { + worst.emplace(fc->getBucketSpread(), FileId(i)); + } else if (dueToBloat && usage > 0) { + double tmp(double(fc->getDiskBloat())/usage); + worst.emplace(tmp, FileId(i)); } } } if (LOG_WOULD_LOG(debug)) { - for (const auto & it : worstBloat) { + for (const auto & it : worst) { const FileChunk & fc = *_fileChunks[it.second.getId()]; LOG(debug, "File '%s' has bloat '%2.2f' and bucket-spread '%1.4f numChunks=%d , numBuckets=%ld, numUniqueBuckets=%ld", fc.getName().c_str(), it.first * 100, fc.getBucketSpread(), fc.getNumChunks(), fc.getNumBuckets(), fc.getNumUniqueBuckets()); } } std::pair<bool, FileId> retval(false, FileId(-1)); - if ( ! worstBloat.empty() && (worstBloat.begin()->first > bloatLimit) && prioritizeDiskBloat) { - retval.first = true; - retval.second = worstBloat.begin()->second; - } else if ( ! worstSpread.empty() && (worstSpread.begin()->first > spreadLimit)) { + if ( ! worst.empty()) { retval.first = true; - retval.second = worstSpread.begin()->second; + retval.second = worst.begin()->second; } if (retval.first) { _currentlyCompacting.insert(_fileChunks[retval.second.getId()]->getNameId()); @@ -422,10 +382,24 @@ LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prio } void -LogDataStore::compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat) { - auto worst = findNextToCompact(bloatLimit, spreadLimit, prioritizeDiskBloat); - if (worst.first) { - compactFile(worst.second); +LogDataStore::compactWorst(uint64_t syncToken, bool compactDiskBloat) { + uint64_t usage = getDiskFootprint(); + uint64_t bloat = getDiskBloat(); + const char * reason = compactDiskBloat ? "bloat" : "spread"; + LOG(debug, "%s", bloatMsg(bloat, usage).c_str()); + const bool doCompact = (_fileChunks.size() > 1); + if (doCompact) { + LOG(debug, "Will compact due to %s: %s", reason, bloatMsg(bloat, usage).c_str()); + auto worst = findNextToCompact(compactDiskBloat); + if (worst.first) { + compactFile(worst.second); + } + flushActiveAndWait(syncToken); + usage = getDiskFootprint(); + bloat = getDiskBloat(); + LOG(info, "Done compacting due to %s: %s", reason, bloatMsg(bloat, usage).c_str()); + } else { + flushActiveAndWait(syncToken); } } @@ -1001,7 +975,7 @@ LogDataStore::computeNumberOfSignificantBucketIdBits(const IBucketizer & bucketi while ((msb > 0) && (msbHistogram[msb - 1] == 0)) { msb--; } - LOG(info, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time()); + LOG(debug, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time()); return msb; } diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h index 0e11b88a178..62f87076759 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h @@ -41,7 +41,6 @@ public: Config & setMaxFileSize(size_t v) { _maxFileSize = v; return *this; } Config & setMaxNumLids(size_t v) { _maxNumLids = v; return *this; } - Config & setMaxDiskBloatFactor(double v) { _maxDiskBloatFactor = v; return *this; } Config & setMaxBucketSpread(double v) { _maxBucketSpread = v; return *this; } Config & setMinFileSizeFactor(double v) { _minFileSizeFactor = v; return *this; } @@ -49,7 +48,6 @@ public: Config & setFileConfig(WriteableFileChunk::Config v) { _fileConfig = v; return *this; } size_t getMaxFileSize() const { return _maxFileSize; } - double getMaxDiskBloatFactor() const { return _maxDiskBloatFactor; } double getMaxBucketSpread() const { return _maxBucketSpread; } double getMinFileSizeFactor() const { return _minFileSizeFactor; } uint32_t getMaxNumLids() const { return _maxNumLids; } @@ -63,7 +61,6 @@ public: bool operator == (const Config &) const; private: size_t _maxFileSize; - double _maxDiskBloatFactor; double _maxBucketSpread; double _minFileSizeFactor; uint32_t _maxNumLids; @@ -109,12 +106,10 @@ public: size_t getDiskFootprint() const override; size_t getDiskHeaderFootprint() const override; size_t getDiskBloat() const override; - size_t getMaxCompactGain() const override; + size_t getMaxSpreadAsBloat() const override; - /** - * Will compact the docsummary up to a lower limit of 5% bloat. - */ - void compact(uint64_t syncToken); + void compactBloat(uint64_t syncToken) { compactWorst(syncToken, true); } + void compactSpread(uint64_t syncToken) { compactWorst(syncToken, false);} const Config & getConfig() const { return _config; } Config & getConfig() { return _config; } @@ -183,10 +178,9 @@ private: class WrapVisitorProgress; class FileChunkHolder; - // Implements ISetLid API void setLid(const ISetLid::unique_lock & guard, uint32_t lid, const LidInfo & lm) override; - void compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat); + void compactWorst(uint64_t syncToken, bool compactDiskBloat); void compactFile(FileId chunkId); typedef vespalib::RcuVector<uint64_t> LidInfoVector; @@ -202,8 +196,6 @@ private: NameIdSet eraseIncompleteCompactedFiles(NameIdSet partList); void internalFlushAll(); - bool isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const; - NameIdSet scanDir(const vespalib::string &dir, const vespalib::string &suffix); FileId allocateFileId(const MonitorGuard & guard); void setNewFileChunk(const MonitorGuard & guard, FileChunk::UP fileChunk); @@ -248,7 +240,7 @@ private: return (_fileChunks.empty() ? 0 : _fileChunks.back()->getLastPersistedSerialNum()); } bool shouldCompactToActiveFile(size_t compactedSize) const; - std::pair<bool, FileId> findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat); + std::pair<bool, FileId> findNextToCompact(bool compactDiskBloat); void incGeneration(); bool canShrinkLidSpace(const MonitorGuard &guard) const; diff --git a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h index de36155bedb..2931f8bce2d 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h @@ -51,7 +51,8 @@ public: ~LogDocumentStore() override; void reconfigure(const Config & config); private: - void compact(uint64_t syncToken) override { _backingStore.compact(syncToken); } + void compactBloat(uint64_t syncToken) override { _backingStore.compactBloat(syncToken); } + void compactSpread(uint64_t syncToken) override { _backingStore.compactSpread(syncToken); } LogDataStore _backingStore; }; diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp index 0f4326aac40..113883a307f 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp @@ -75,7 +75,7 @@ BlobSequenceReader::BlobSequenceReader(AttributeVector& attr, bool has_index) : ReaderBase(attr), _use_index_file(has_index && has_index_file(attr) && can_use_index_save_file(attr.getConfig(), - search::attribute::AttributeHeader::extractTags(getDatHeader()))), + search::attribute::AttributeHeader::extractTags(getDatHeader(), attr.getBaseFileName()))), _index_file(_use_index_file ? attribute::LoadUtils::openFile(attr, DenseTensorAttributeSaver::index_file_suffix()) : std::unique_ptr<Fast_BufferedFile>()) @@ -132,7 +132,7 @@ DenseTensorAttribute::update_stat() { vespalib::MemoryUsage result = TensorAttribute::update_stat(); if (_index) { - result.merge(_index->memory_usage()); + result.merge(_index->update_stat(getConfig().getCompactionStrategy())); } return result; } diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp index d3c2998333a..ed3fb737b7d 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp @@ -17,6 +17,8 @@ namespace { constexpr size_t MIN_BUFFER_ARRAYS = 1024; constexpr size_t DENSE_TENSOR_ALIGNMENT = 32; +constexpr size_t DENSE_TENSOR_ALIGNMENT_SMALL = 16; +constexpr size_t DENSE_TENSOR_ALIGNMENT_MIN = 8; size_t my_align(size_t size, size_t alignment) { size += alignment - 1; @@ -27,17 +29,20 @@ size_t my_align(size_t size, size_t alignment) { DenseTensorStore::TensorSizeCalc::TensorSizeCalc(const ValueType &type) : _numCells(1u), - _cell_type(type.cell_type()) + _cell_type(type.cell_type()), + _aligned_size(0u) { for (const auto &dim: type.dimensions()) { _numCells *= dim.size; } -} - -size_t -DenseTensorStore::TensorSizeCalc::alignedSize() const -{ - return my_align(bufSize(), DENSE_TENSOR_ALIGNMENT); + auto buf_size = bufSize(); + size_t alignment = DENSE_TENSOR_ALIGNMENT; + if (buf_size <= DENSE_TENSOR_ALIGNMENT_MIN) { + alignment = DENSE_TENSOR_ALIGNMENT_MIN; + } else if (buf_size <= DENSE_TENSOR_ALIGNMENT_SMALL) { + alignment = DENSE_TENSOR_ALIGNMENT_SMALL; + } + _aligned_size = my_align(buf_size, alignment); } DenseTensorStore::BufferType::BufferType(const TensorSizeCalc &tensorSizeCalc, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator) @@ -79,12 +84,6 @@ DenseTensorStore::~DenseTensorStore() _store.dropBuffers(); } -const void * -DenseTensorStore::getRawBuffer(RefType ref) const -{ - return _store.getEntryArray<char>(ref, _bufferType.getArraySize()); -} - namespace { void clearPadAreaAfterBuffer(char *buffer, size_t bufSize, size_t alignedBufSize) { @@ -136,15 +135,6 @@ DenseTensorStore::getTensor(EntryRef ref) const return std::make_unique<vespalib::eval::DenseValueView>(_type, cells_ref); } -vespalib::eval::TypedCells -DenseTensorStore::get_typed_cells(EntryRef ref) const -{ - if (!ref.valid()) { - return vespalib::eval::TypedCells(&_emptySpace[0], _type.cell_type(), getNumCells()); - } - return vespalib::eval::TypedCells(getRawBuffer(ref), _type.cell_type(), getNumCells()); -} - template <class TensorType> TensorStore::EntryRef DenseTensorStore::setDenseTensor(const TensorType &tensor) diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h index 3b7cb71863e..47932fbff7e 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h @@ -25,12 +25,13 @@ public: { size_t _numCells; // product of dimension sizes vespalib::eval::CellType _cell_type; + size_t _aligned_size; TensorSizeCalc(const ValueType &type); size_t bufSize() const { return vespalib::eval::CellTypeUtils::mem_size(_cell_type, _numCells); } - size_t alignedSize() const; + size_t alignedSize() const noexcept { return _aligned_size; } }; class BufferType : public vespalib::datastore::BufferType<char> @@ -50,12 +51,9 @@ private: ValueType _type; // type of dense tensor std::vector<char> _emptySpace; - size_t unboundCells(const void *buffer) const; - template <class TensorType> TensorStore::EntryRef setDenseTensor(const TensorType &tensor); - public: DenseTensorStore(const ValueType &type, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator); ~DenseTensorStore() override; @@ -63,12 +61,17 @@ public: const ValueType &type() const { return _type; } size_t getNumCells() const { return _tensorSizeCalc._numCells; } size_t getBufSize() const { return _tensorSizeCalc.bufSize(); } - const void *getRawBuffer(RefType ref) const; + const void *getRawBuffer(RefType ref) const { + return _store.getEntryArray<char>(ref, _bufferType.getArraySize()); + } vespalib::datastore::Handle<char> allocRawBuffer(); void holdTensor(EntryRef ref) override; EntryRef move(EntryRef ref) override; std::unique_ptr<vespalib::eval::Value> getTensor(EntryRef ref) const; - vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const; + vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const { + return vespalib::eval::TypedCells(ref.valid() ? getRawBuffer(ref) : &_emptySpace[0], + _type.cell_type(), getNumCells()); + } EntryRef setTensor(const vespalib::eval::Value &tensor); // The following method is meant to be used only for unit tests. uint32_t getArraySize() const { return _bufferType.getArraySize(); } diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp index 7f9f20e07c4..43596478a6f 100644 --- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp @@ -43,4 +43,13 @@ HammingDistance::calc(const vespalib::eval::TypedCells& lhs, } } +double +HammingDistance::calc_with_limit(const vespalib::eval::TypedCells& lhs, + const vespalib::eval::TypedCells& rhs, + double) const +{ + // consider optimizing: + return calc(lhs, rhs); +} + } diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h index f0b7b159b90..c64fc5b532d 100644 --- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h +++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h @@ -15,7 +15,7 @@ namespace search::tensor { * or (for int8 cells, aka binary data only) * "number of bits that are different" */ -class HammingDistance : public DistanceFunction { +class HammingDistance final : public DistanceFunction { public: HammingDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {} double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override; @@ -26,13 +26,7 @@ public: double score = 1.0 / (1.0 + distance); return score; } - double calc_with_limit(const vespalib::eval::TypedCells& lhs, - const vespalib::eval::TypedCells& rhs, - double) const override - { - // consider optimizing: - return calc(lhs, rhs); - } + double calc_with_limit(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs, double) const override; }; } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp index 2889dc425db..c99e059815b 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp @@ -8,7 +8,6 @@ #include "hnsw_index_saver.h" #include "random_level_generator.h" #include "reusable_set_visited_tracker.h" -#include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchlib/attribute/address_space_components.h> #include <vespa/searchlib/attribute/address_space_usage.h> #include <vespa/searchlib/util/fileutil.h> @@ -16,6 +15,7 @@ #include <vespa/vespalib/data/slime/cursor.h> #include <vespa/vespalib/data/slime/inserter.h> #include <vespa/vespalib/datastore/array_store.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/util/memory_allocator.h> #include <vespa/vespalib/util/rcuvector.hpp> #include <vespa/vespalib/util/size_literals.h> @@ -30,6 +30,7 @@ namespace search::tensor { using search::AddressSpaceComponents; using search::StateExplorerUtils; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace { @@ -337,10 +338,7 @@ HnswIndex::HnswIndex(const DocVectorAccess& vectors, DistanceFunction::UP distan _level_generator(std::move(level_generator)), _cfg(cfg), _visited_set_pool(), - _cached_level_arrays_memory_usage(), - _cached_level_arrays_address_space_usage(0, 0, (1ull << 32)), - _cached_link_arrays_memory_usage(), - _cached_link_arrays_address_space_usage(0, 0, (1ull << 32)) + _compaction_spec() { assert(_distance_func); } @@ -531,18 +529,18 @@ HnswIndex::trim_hold_lists(generation_t first_used_gen) } void -HnswIndex::compact_level_arrays(bool compact_memory, bool compact_address_space) +HnswIndex::compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto context = _graph.nodes.compactWorst(compact_memory, compact_address_space); + auto context = _graph.nodes.compactWorst(compaction_spec, compaction_strategy); uint32_t doc_id_limit = _graph.node_refs.size(); vespalib::ArrayRef<AtomicEntryRef> refs(&_graph.node_refs[0], doc_id_limit); context->compact(refs); } void -HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space) +HnswIndex::compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - auto context = _graph.links.compactWorst(compact_memory, compact_address_space); + auto context = _graph.links.compactWorst(compaction_spec, compaction_strategy); uint32_t doc_id_limit = _graph.node_refs.size(); for (uint32_t doc_id = 1; doc_id < doc_id_limit; ++doc_id) { EntryRef level_ref = _graph.node_refs[doc_id].load_relaxed(); @@ -553,40 +551,24 @@ HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space) } } -namespace { - bool -consider_compact_arrays(const CompactionStrategy& compaction_strategy, vespalib::MemoryUsage& memory_usage, vespalib::AddressSpace& address_space_usage, std::function<void(bool,bool)> compact_arrays) -{ - size_t used_bytes = memory_usage.usedBytes(); - size_t dead_bytes = memory_usage.deadBytes(); - bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes); - size_t used_address_space = address_space_usage.used(); - size_t dead_address_space = address_space_usage.dead(); - bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space); - if (compact_memory || compact_address_space) { - compact_arrays(compact_memory, compact_address_space); +HnswIndex::consider_compact_level_arrays(const CompactionStrategy& compaction_strategy) +{ + if (_compaction_spec.level_arrays().compact()) { + compact_level_arrays(_compaction_spec.level_arrays(), compaction_strategy); return true; } return false; } -} - -bool -HnswIndex::consider_compact_level_arrays(const CompactionStrategy& compaction_strategy) -{ - return consider_compact_arrays(compaction_strategy, _cached_level_arrays_memory_usage, _cached_level_arrays_address_space_usage, - [this](bool compact_memory, bool compact_address_space) - { compact_level_arrays(compact_memory, compact_address_space); }); -} - bool HnswIndex::consider_compact_link_arrays(const CompactionStrategy& compaction_strategy) { - return consider_compact_arrays(compaction_strategy, _cached_link_arrays_memory_usage, _cached_link_arrays_address_space_usage, - [this](bool compact_memory, bool compact_address_space) - { compact_link_arrays(compact_memory, compact_address_space); }); + if (_compaction_spec.link_arrays().compact()) { + compact_link_arrays(_compaction_spec.link_arrays(), compaction_strategy); + return true; + } + return false; } bool @@ -603,16 +585,18 @@ HnswIndex::consider_compact(const CompactionStrategy& compaction_strategy) } vespalib::MemoryUsage -HnswIndex::update_stat() +HnswIndex::update_stat(const CompactionStrategy& compaction_strategy) { vespalib::MemoryUsage result; result.merge(_graph.node_refs.getMemoryUsage()); - _cached_level_arrays_memory_usage = _graph.nodes.getMemoryUsage(); - _cached_level_arrays_address_space_usage = _graph.nodes.addressSpaceUsage(); - result.merge(_cached_level_arrays_memory_usage); - _cached_link_arrays_memory_usage = _graph.links.getMemoryUsage(); - _cached_link_arrays_address_space_usage = _graph.links.addressSpaceUsage(); - result.merge(_cached_link_arrays_memory_usage); + auto level_arrays_memory_usage = _graph.nodes.getMemoryUsage(); + auto level_arrays_address_space_usage = _graph.nodes.addressSpaceUsage(); + result.merge(level_arrays_memory_usage); + auto link_arrays_memory_usage = _graph.links.getMemoryUsage(); + auto link_arrays_address_space_usage = _graph.links.addressSpaceUsage(); + _compaction_spec = HnswIndexCompactionSpec(compaction_strategy.should_compact(level_arrays_memory_usage, level_arrays_address_space_usage), + compaction_strategy.should_compact(link_arrays_memory_usage, link_arrays_address_space_usage)); + result.merge(link_arrays_memory_usage); result.merge(_visited_set_pool.memory_usage()); return result; } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h index d8f3c4c97fa..f607af587b5 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h @@ -13,6 +13,7 @@ #include <vespa/searchlib/common/bitvector.h> #include <vespa/vespalib/datastore/array_store.h> #include <vespa/vespalib/datastore/atomic_entry_ref.h> +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/entryref.h> #include <vespa/vespalib/util/rcuvector.h> #include <vespa/vespalib/util/reusable_set_pool.h> @@ -61,6 +62,25 @@ public: bool heuristic_select_neighbors() const { return _heuristic_select_neighbors; } }; + class HnswIndexCompactionSpec { + CompactionSpec _level_arrays; + CompactionSpec _link_arrays; + + public: + HnswIndexCompactionSpec() + : _level_arrays(), + _link_arrays() + { + } + HnswIndexCompactionSpec(CompactionSpec level_arrays_, CompactionSpec link_arrays_) + : _level_arrays(level_arrays_), + _link_arrays(link_arrays_) + { + } + CompactionSpec level_arrays() const noexcept { return _level_arrays; } + CompactionSpec link_arrays() const noexcept { return _link_arrays; } + }; + protected: using AtomicEntryRef = HnswGraph::AtomicEntryRef; using NodeStore = HnswGraph::NodeStore; @@ -80,10 +100,7 @@ protected: RandomLevelGenerator::UP _level_generator; Config _cfg; mutable vespalib::ReusableSetPool _visited_set_pool; - vespalib::MemoryUsage _cached_level_arrays_memory_usage; - vespalib::AddressSpace _cached_level_arrays_address_space_usage; - vespalib::MemoryUsage _cached_link_arrays_memory_usage; - vespalib::AddressSpace _cached_link_arrays_address_space_usage; + HnswIndexCompactionSpec _compaction_spec; uint32_t max_links_for_level(uint32_t level) const; void add_link_to(uint32_t docid, uint32_t level, const LinkArrayRef& old_links, uint32_t new_link) { @@ -171,12 +188,12 @@ public: void remove_document(uint32_t docid) override; void transfer_hold_lists(generation_t current_gen) override; void trim_hold_lists(generation_t first_used_gen) override; - void compact_level_arrays(bool compact_memory, bool compact_addreess_space); - void compact_link_arrays(bool compact_memory, bool compact_address_space); + void compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); + void compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); bool consider_compact_level_arrays(const CompactionStrategy& compaction_strategy); bool consider_compact_link_arrays(const CompactionStrategy& compaction_strategy); bool consider_compact(const CompactionStrategy& compaction_strategy) override; - vespalib::MemoryUsage update_stat() override; + vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) override; vespalib::MemoryUsage memory_usage() const override; void populate_address_space_usage(search::AddressSpaceUsage& usage) const override; void get_state(const vespalib::slime::Inserter& inserter) const override; diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h index 411d09cd2d3..530d3e1036d 100644 --- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h +++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h @@ -12,6 +12,10 @@ class FastOS_FileInterface; +namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; +} namespace vespalib::slime { struct Inserter; } namespace search::fileutil { class LoadedBuffer; } @@ -19,7 +23,6 @@ namespace search::fileutil { class LoadedBuffer; } namespace search { class AddressSpaceUsage; class BitVector; -class CompactionStrategy; } namespace search::tensor { @@ -32,6 +35,8 @@ class NearestNeighborIndexSaver; */ class NearestNeighborIndex { public: + using CompactionSpec = vespalib::datastore::CompactionSpec; + using CompactionStrategy = vespalib::datastore::CompactionStrategy; using generation_t = vespalib::GenerationHandler::generation_t; struct Neighbor { uint32_t docid; @@ -67,7 +72,7 @@ public: virtual void transfer_hold_lists(generation_t current_gen) = 0; virtual void trim_hold_lists(generation_t first_used_gen) = 0; virtual bool consider_compact(const CompactionStrategy& compaction_strategy) = 0; - virtual vespalib::MemoryUsage update_stat() = 0; + virtual vespalib::MemoryUsage update_stat(const CompactionStrategy& compaction_strategy) = 0; virtual vespalib::MemoryUsage memory_usage() const = 0; virtual void populate_address_space_usage(search::AddressSpaceUsage& usage) const = 0; virtual void get_state(const vespalib::slime::Inserter& inserter) const = 0; diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp index a19541072da..5bd14d2c234 100644 --- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp @@ -89,9 +89,7 @@ TensorAttribute::onCommit() incGeneration(); if (getFirstUsedGeneration() > _compactGeneration) { // No data held from previous compact operation - size_t used = _cached_tensor_store_memory_usage.usedBytes(); - size_t dead = _cached_tensor_store_memory_usage.deadBytes(); - if (getConfig().getCompactionStrategy().should_compact_memory(used, dead)) { + if (getConfig().getCompactionStrategy().should_compact_memory(_cached_tensor_store_memory_usage)) { compactWorst(); } } diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp index 24943b53e6d..96b94955570 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp @@ -12,6 +12,7 @@ #include <algorithm> #include <thread> #include <cassert> +#include <future> #include <vespa/log/log.h> #include <vespa/vespalib/util/threadstackexecutor.h> @@ -56,11 +57,13 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec _fileHeaderContext(fileHeaderContext), _markedDeleted(false) { - int retval(0); - if ((retval = makeDirectory(_baseDir.c_str())) != 0) { + assert(_config.getEncoding().getCompression() != Encoding::Compression::none); + int retval = makeDirectory(_baseDir.c_str()); + if (retval != 0) { throw runtime_error(fmt("Failed creating basedirectory %s r(%d), e(%d)", _baseDir.c_str(), retval, errno)); } - if ((retval = makeDirectory(dir().c_str())) != 0) { + retval = makeDirectory(dir().c_str()); + if (retval != 0) { throw runtime_error(fmt("Failed creating domaindir %s r(%d), e(%d)", dir().c_str(), retval, errno)); } SerialNumList partIdVector = scanDir(); @@ -76,8 +79,7 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec } pending.waitForZeroRefCount(); if (_parts.empty() || _parts.crbegin()->second->isClosed()) { - _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, false); + _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _fileHeaderContext, false); vespalib::File::sync(dir()); } _lastSerial = end(); @@ -86,13 +88,13 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec Domain & Domain::setConfig(const DomainConfig & cfg) { _config = cfg; + assert(_config.getEncoding().getCompression() != Encoding::Compression::none); return *this; } void Domain::addPart(SerialNum partId, bool isLastPart) { - auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, isLastPart); + auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _fileHeaderContext, isLastPart); if (dp->size() == 0) { // Only last domain part is allowed to be truncated down to // empty size. @@ -331,8 +333,7 @@ Domain::optionallyRotateFile(SerialNum serialNum) { triggerSyncNow({}); waitPendingSync(_syncMonitor, _syncCond, _pendingSync); dp->close(); - dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _config.getEncoding(), - _config.getCompressionlevel(), _fileHeaderContext, false); + dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _fileHeaderContext, false); { std::lock_guard guard(_lock); _parts[serialNum] = dp; @@ -394,25 +395,32 @@ Domain::grabCurrentChunk(const UniqueLock & guard) { void Domain::commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard) { assert(chunkOrderGuard.mutex() == &_currentChunkMonitor && chunkOrderGuard.owns_lock()); - _singleCommitter->execute( makeLambdaTask([this, chunk = std::move(chunk)]() mutable { - doCommit(std::move(chunk)); + if (chunk->getPacket().empty()) return; + std::promise<SerializedChunk> promise; + std::future<SerializedChunk> future = promise.get_future(); + _executor.execute(makeLambdaTask([promise=std::move(promise), chunk = std::move(chunk), + encoding=_config.getEncoding(), compressionLevel=_config.getCompressionlevel()]() mutable { + promise.set_value(SerializedChunk(std::move(chunk), encoding, compressionLevel)); + })); + _singleCommitter->execute( makeLambdaTask([this, future = std::move(future)]() mutable { + doCommit(future.get()); })); } + + void -Domain::doCommit(std::unique_ptr<CommitChunk> chunk) { - const Packet & packet = chunk->getPacket(); - if (packet.empty()) return; +Domain::doCommit(const SerializedChunk & serialized) { - SerialNum firstSerial = packet.range().from(); - DomainPart::SP dp = optionallyRotateFile(firstSerial); - dp->commit(firstSerial, packet); + SerialNumRange range = serialized.range(); + DomainPart::SP dp = optionallyRotateFile(range.from()); + dp->commit(serialized); if (_config.getFSyncOnCommit()) { dp->sync(); } cleanSessions(); LOG(debug, "Releasing %zu acks and %zu entries and %zu bytes.", - chunk->getNumCallBacks(), chunk->getPacket().size(), chunk->sizeBytes()); + serialized.commitChunk().getNumCallBacks(), serialized.getNumEntries(), serialized.getData().size()); } bool diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.h b/searchlib/src/vespa/searchlib/transactionlog/domain.h index 2e912ad6201..eb3d0b6b10b 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.h @@ -64,7 +64,7 @@ private: std::unique_ptr<CommitChunk> grabCurrentChunk(const UniqueLock & guard); void commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard); - void doCommit(std::unique_ptr<CommitChunk> chunk); + void doCommit(const SerializedChunk & serialized); SerialNum begin(const UniqueLock & guard) const; SerialNum end(const UniqueLock & guard) const; size_t byteSize(const UniqueLock & guard) const; diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp index 3dad67df177..2ca2f15545d 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp @@ -247,11 +247,9 @@ DomainPart::buildPacketMapping(bool allowTruncate) return currPos; } -DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, Encoding encoding, - uint8_t compressionLevel, const FileHeaderContext &fileHeaderContext, bool allowTruncate) - : _encoding(encoding), - _compressionLevel(compressionLevel), - _lock(), +DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, + const FileHeaderContext &fileHeaderContext, bool allowTruncate) + : _lock(), _fileLock(), _range(s), _sz(0), @@ -379,35 +377,21 @@ DomainPart::erase(SerialNum to) } void -DomainPart::commit(SerialNum firstSerial, const Packet &packet) +DomainPart::commit(const SerializedChunk & serialized) { + SerialNumRange range = serialized.range(); + int64_t firstPos(byteSize()); - nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size()); + assert(_range.to() < range.to()); + _sz += serialized.getNumEntries(); + _range.to(range.to()); if (_range.from() == 0) { - _range.from(firstSerial); - } - IChunk::UP chunk = IChunk::create(_encoding, _compressionLevel); - for (size_t i(0); h.size() > 0; i++) { - //LOG(spam, - //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)", - //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining()); - Packet::Entry entry; - entry.deserialize(h); - if (_range.to() < entry.serial()) { - chunk->add(entry); - assert(_encoding.getCompression() != Encoding::Compression::none); - _sz++; - _range.to(entry.serial()); - } else { - throw runtime_error(fmt("Incoming serial number(%" PRIu64 ") must be bigger than the last one (%" PRIu64 ").", - entry.serial(), _range.to())); - } - } - if ( ! chunk->getEntries().empty()) { - write(*_transLog, *chunk); + _range.from(range.from()); } + + write(*_transLog, range, serialized.getData()); std::lock_guard guard(_lock); - _skipList.emplace_back(firstSerial, firstPos); + _skipList.emplace_back(range.from(), firstPos); } void @@ -442,26 +426,15 @@ DomainPart::visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet) } void -DomainPart::write(FastOS_FileInterface &file, const IChunk & chunk) +DomainPart::write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf) { - nbostream os; - size_t begin = os.wp(); - os << _encoding.getRaw(); // Placeholder for encoding - os << uint32_t(0); // Placeholder for size - Encoding realEncoding = chunk.encode(os); - size_t end = os.wp(); - os.wp(0); - os << realEncoding.getRaw(); //Patching real encoding - os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size. - os.wp(end); std::lock_guard guard(_writeLock); - if ( ! file.CheckedWrite(os.data(), os.size()) ) { - throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), chunk.range(), os.size())); + if ( ! file.CheckedWrite(buf.data(), buf.size()) ) { + throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), range, buf.size())); } - LOG(debug, "Wrote chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)", - chunk.getEntries().size(), os.size(), chunk.range().from(), chunk.range().to(), _encoding.getRaw(), realEncoding.getRaw()); - _writtenSerial = chunk.range().to(); - _byteSize.fetch_add(os.size(), std::memory_order_release); + LOG(debug, "Wrote chunk with and %zu bytes, range[%" PRIu64 ", %" PRIu64 "]", buf.size(), range.from(), range.to()); + _writtenSerial = range.to(); + _byteSize.fetch_add(buf.size(), std::memory_order_release); } bool diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h index 9ab0db54391..ea5290c433b 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h @@ -19,13 +19,13 @@ public: using SP = std::shared_ptr<DomainPart>; DomainPart(const DomainPart &) = delete; DomainPart& operator=(const DomainPart &) = delete; - DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, Encoding defaultEncoding, - uint8_t compressionLevel, const common::FileHeaderContext &FileHeaderContext, bool allowTruncate); + DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, + const common::FileHeaderContext &FileHeaderContext, bool allowTruncate); ~DomainPart(); const vespalib::string &fileName() const { return _fileName; } - void commit(SerialNum firstSerial, const Packet &packet); + void commit(const SerializedChunk & serialized); bool erase(SerialNum to); bool visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet); bool close(); @@ -49,7 +49,7 @@ private: static Packet readPacket(FastOS_FileInterface & file, SerialNumRange wanted, size_t targetSize, bool allowTruncate); static bool read(FastOS_FileInterface &file, IChunk::UP & chunk, Alloc &buf, bool allowTruncate); - void write(FastOS_FileInterface &file, const IChunk & entry); + void write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf); void writeHeader(const common::FileHeaderContext &fileHeaderContext); class SkipInfo @@ -69,8 +69,6 @@ private: SerialNum _id; uint64_t _pos; }; - const Encoding _encoding; - const uint8_t _compressionLevel; std::mutex _lock; std::mutex _fileLock; SerialNumRange _range; diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp index ee1631ea8c2..99370d263ec 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp @@ -8,6 +8,9 @@ #include <cassert> #include <ostream> +#include <vespa/log/log.h> +LOG_SETUP(".searchlib.transactionlog.ichunk"); + using std::make_unique; using vespalib::make_string_short::fmt; using vespalib::nbostream_longlivedbuf; @@ -115,4 +118,48 @@ std::ostream & operator << (std::ostream & os, Encoding e) { return os << "crc=" << e.getCrc() << " compression=" << e.getCompression(); } + +void +encode(vespalib::nbostream & os, const IChunk & chunk, Encoding encoding) { + size_t begin = os.wp(); + os << encoding.getRaw(); // Placeholder for encoding + os << uint32_t(0); // Placeholder for size + Encoding realEncoding = chunk.encode(os); + size_t end = os.wp(); + os.wp(0); + os << realEncoding.getRaw(); //Patching real encoding + os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size. + os.wp(end); + SerialNumRange range = chunk.range(); + LOG(spam, "Encoded chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)", + chunk.getEntries().size(), os.size(), range.from(), range.to(), encoding.getRaw(), realEncoding.getRaw()); +} + +SerializedChunk::SerializedChunk(std::unique_ptr<CommitChunk> commitChunk, Encoding encoding, uint8_t compressionLevel) + : _commitChunk(std::move(commitChunk)), + _os(), + _range(_commitChunk->getPacket().range()), + _numEntries(_commitChunk->getPacket().size()) +{ + const Packet & packet = _commitChunk->getPacket(); + nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size()); + + IChunk::UP chunk = IChunk::create(encoding, compressionLevel); + SerialNum prev = 0; + for (size_t i(0); h.size() > 0; i++) { + //LOG(spam, + //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)", + //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining()); + Packet::Entry entry; + entry.deserialize(h); + assert (prev < entry.serial()); + chunk->add(entry); + prev = entry.serial(); + } + assert(! chunk->getEntries().empty()); + encode(_os, *chunk, encoding); +} +vespalib::ConstBufferRef SerializedChunk::getData() const { + return vespalib::ConstBufferRef(_os.data(), _os.size()); +} } diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h index 02bd0ce9426..e5daeb810f4 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h +++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h @@ -33,6 +33,29 @@ private: std::ostream & operator << (std::ostream & os, Encoding e); /** + * Represents a completely encoded chunk with a buffer ready to be persisted, + * and the range and number of entries it covers. + */ +class SerializedChunk { +public: + SerializedChunk(std::unique_ptr<CommitChunk> chunk, Encoding encoding, uint8_t compressionLevel); + SerializedChunk(SerializedChunk &&) = default; + SerializedChunk & operator=(SerializedChunk &&) = default; + SerializedChunk(const SerializedChunk &) = delete; + SerializedChunk & operator=(const SerializedChunk &) = delete; + vespalib::ConstBufferRef getData() const; + SerialNumRange range() const { return _range; } + size_t getNumEntries() const { return _numEntries; } + const CommitChunk & commitChunk() const { return *_commitChunk; } +private: + // CommitChunk is required to ensure we do not reply until committed to the TLS. + std::unique_ptr<CommitChunk> _commitChunk; + vespalib::nbostream _os; + SerialNumRange _range; + size_t _numEntries; +}; + +/** * Interface for different chunk formats. * Format specifies both crc type, and compression type. */ diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp index ce190d2c093..db2cf2a255d 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp @@ -578,9 +578,10 @@ TransLogServer::domainCommit(FRT_RPCRequest *req) try { vespalib::Gate gate; { + auto onDone = make_shared<vespalib::GateCallback>(gate); // Need to scope in order to drain out all the callbacks. - domain->append(packet, make_shared<vespalib::GateCallback>(gate)); - auto keep = domain->startCommit(make_shared<vespalib::IgnoreCallback>()); + domain->append(packet, onDone); + auto keep = domain->startCommit(onDone); } gate.await(); ret.AddInt32(0); diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java index 30d4186f8a5..46a38a77844 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/GlobPattern.java @@ -15,10 +15,10 @@ class GlobPattern { private final char[] boundaries; private final Pattern regexPattern; - GlobPattern(String pattern, char[] boundaries) { + GlobPattern(String pattern, char[] boundaries, boolean enableSingleCharWildcard) { this.pattern = pattern; this.boundaries = boundaries; - this.regexPattern = toRegexPattern(pattern, boundaries); + this.regexPattern = toRegexPattern(pattern, boundaries, enableSingleCharWildcard); } boolean matches(String value) { return regexPattern.matcher(value).matches(); } @@ -27,12 +27,12 @@ class GlobPattern { Pattern regexPattern() { return regexPattern; } char[] boundaries() { return boundaries; } - private static Pattern toRegexPattern(String pattern, char[] boundaries) { + private static Pattern toRegexPattern(String pattern, char[] boundaries, boolean enableSingleCharWildcard) { StringBuilder builder = new StringBuilder("^"); StringBuilder precedingCharactersToQuote = new StringBuilder(); char[] chars = pattern.toCharArray(); for (char c : chars) { - if (c == '?' || c == '*') { + if ((enableSingleCharWildcard && c == '?') || c == '*') { builder.append(quotePrecedingLiteralsAndReset(precedingCharactersToQuote)); // Note: we explicitly stop matching at a separator boundary. // This is to make matching less vulnerable to dirty tricks (e.g dot as boundary for hostnames). diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java index d59052a48ef..cb9ba13cae4 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/HostGlobPattern.java @@ -11,7 +11,7 @@ class HostGlobPattern implements RequiredPeerCredential.Pattern { private final GlobPattern globPattern; HostGlobPattern(String pattern) { - this.globPattern = new GlobPattern(pattern, new char[] {'.'}); + this.globPattern = new GlobPattern(pattern, new char[] {'.'}, true); } @Override diff --git a/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java b/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java index 006ca83a403..b2cc0688bb9 100644 --- a/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java +++ b/security-utils/src/main/java/com/yahoo/security/tls/policy/UriGlobPattern.java @@ -13,7 +13,7 @@ class UriGlobPattern implements RequiredPeerCredential.Pattern { private final GlobPattern globPattern; UriGlobPattern(String globPattern) { - this.globPattern = new GlobPattern(globPattern, new char[] {'/'}); + this.globPattern = new GlobPattern(globPattern, new char[] {'/'}, false); } @Override public String asString() { return globPattern.asString(); } diff --git a/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java b/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java index b7f4b6b9c46..4350aa2b0a9 100644 --- a/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java +++ b/security-utils/src/test/java/com/yahoo/security/tls/policy/GlobPatternTest.java @@ -100,7 +100,7 @@ class GlobPatternTest { } private static GlobPattern globPattern(String pattern, String boundaries) { - return new GlobPattern(pattern, boundaries.toCharArray()); + return new GlobPattern(pattern, boundaries.toCharArray(), true); } } diff --git a/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java b/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java index d598fbe1b84..c60c782da14 100644 --- a/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java +++ b/security-utils/src/test/java/com/yahoo/security/tls/policy/UriGlobPatternTest.java @@ -20,6 +20,8 @@ class UriGlobPatternTest { assertMatches("scheme://*/segment1/segment2", "scheme://hostname/segment1/segment2"); assertMatches("scheme://*.name/", "scheme://host.name/"); assertNotMatches("scheme://*", "scheme://hostname/"); + assertMatches("scheme://hostname/mypath?query=value", "scheme://hostname/mypath?query=value"); + assertNotMatches("scheme://hostname/?", "scheme://hostname/p"); } private void assertMatches(String pattern, String value) { diff --git a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp index b962ecf611e..5601336fdfd 100644 --- a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp +++ b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp @@ -2,21 +2,20 @@ #include "slobrokserver.h" -#include <vespa/log/log.h> -LOG_SETUP(".slobrok.server"); - namespace slobrok { +VESPA_THREAD_STACK_TAG(slobrok_server_thread); + SlobrokServer::SlobrokServer(ConfigShim &shim) : _env(shim), - _thread(*this) + _thread(*this, slobrok_server_thread) { _thread.start(); } SlobrokServer::SlobrokServer(uint32_t port) : _env(ConfigShim(port)), - _thread(*this) + _thread(*this, slobrok_server_thread) { _thread.start(); } diff --git a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp index 732ab122546..dd71380f64a 100644 --- a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp +++ b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp @@ -35,13 +35,18 @@ void verifyResizeTaskLimit(bool up) { std::condition_variable cond; std::atomic<uint64_t> started(0); std::atomic<uint64_t> allowed(0); - SingleExecutor executor(sequenced_executor, 10); + constexpr uint32_t INITIAL = 20; + const uint32_t INITIAL_2inN = roundUp2inN(INITIAL); + double waterMarkRatio = 0.5; + SingleExecutor executor(sequenced_executor, INITIAL, INITIAL*waterMarkRatio, 10ms); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + EXPECT_EQUAL(uint32_t(INITIAL_2inN*waterMarkRatio), executor.get_watermark()); - uint32_t targetTaskLimit = up ? 20 : 5; + uint32_t targetTaskLimit = up ? 40 : 5; uint32_t roundedTaskLimit = roundUp2inN(targetTaskLimit); - EXPECT_NOT_EQUAL(16u, roundedTaskLimit); + EXPECT_NOT_EQUAL(INITIAL_2inN, roundedTaskLimit); - for (uint64_t i(0); i < 10; i++) { + for (uint64_t i(0); i < INITIAL; i++) { executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] { started++; std::unique_lock guard(lock); @@ -53,15 +58,16 @@ void verifyResizeTaskLimit(bool up) { while (started < 1); EXPECT_EQUAL(1u, started); executor.setTaskLimit(targetTaskLimit); - EXPECT_EQUAL(16u, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN*waterMarkRatio, executor.get_watermark()); allowed = 5; while (started < 6); EXPECT_EQUAL(6u, started); - EXPECT_EQUAL(16u, executor.getTaskLimit()); - allowed = 10; - while (started < 10); - EXPECT_EQUAL(10u, started); - EXPECT_EQUAL(16u, executor.getTaskLimit()); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); + allowed = INITIAL; + while (started < INITIAL); + EXPECT_EQUAL(INITIAL, started); + EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit()); executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] { started++; std::unique_lock guard(lock); @@ -69,11 +75,13 @@ void verifyResizeTaskLimit(bool up) { cond.wait_for(guard, 1ms); } })); - while (started < 11); - EXPECT_EQUAL(11u, started); + while (started < INITIAL + 1); + EXPECT_EQUAL(INITIAL + 1, started); EXPECT_EQUAL(roundedTaskLimit, executor.getTaskLimit()); - allowed = 11; + EXPECT_EQUAL(roundedTaskLimit*waterMarkRatio, executor.get_watermark()); + allowed = INITIAL + 1; } + TEST("test that resizing up and down works") { TEST_DO(verifyResizeTaskLimit(true)); TEST_DO(verifyResizeTaskLimit(false)); diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp index db27c13463f..76b0235301b 100644 --- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp @@ -66,7 +66,7 @@ SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads, uint3 executors.reserve(threads); for (uint32_t id = 0; id < threads; ++id) { if (optimize == OptimizeFor::THROUGHPUT) { - uint32_t watermark = kindOfWatermark == 0 ? taskLimit / 2 : kindOfWatermark; + uint32_t watermark = (kindOfWatermark == 0) ? taskLimit / 10 : kindOfWatermark; executors.push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, 100ms)); } else { executors.push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit, func)); diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp index a2962c6ea84..a99bce0a705 100644 --- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp @@ -11,14 +11,15 @@ SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit) { } SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t watermark, duration reactionTime) - : _taskLimit(vespalib::roundUp2inN(taskLimit)), + : _watermarkRatio(watermark < taskLimit ? double(watermark) / taskLimit : 1.0), + _taskLimit(vespalib::roundUp2inN(taskLimit)), _wantedTaskLimit(_taskLimit.load()), _rp(0), _tasks(std::make_unique<Task::UP[]>(_taskLimit)), _mutex(), _consumerCondition(), _producerCondition(), - _thread(*this), + _thread(*this, func), _idleTracker(steady_clock::now()), _threadIdleTracker(), _wakeupCount(0), @@ -27,11 +28,10 @@ SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t wat _wakeupConsumerAt(0), _producerNeedWakeupAt(0), _wp(0), - _watermark(std::min(_taskLimit.load(), watermark)), + _watermark(_taskLimit.load()*_watermarkRatio), _reactionTime(reactionTime), _closed(false) { - (void) func; //TODO implement similar to ThreadStackExecutor assert(taskLimit >= watermark); _thread.start(); } @@ -75,7 +75,7 @@ SingleExecutor::execute(Task::UP task) { void SingleExecutor::setTaskLimit(uint32_t taskLimit) { - _wantedTaskLimit = std::max(vespalib::roundUp2inN(taskLimit), size_t(_watermark)); + _wantedTaskLimit = vespalib::roundUp2inN(taskLimit); } void @@ -117,7 +117,7 @@ SingleExecutor::run() { while (!_thread.stopped()) { drain_tasks(); _producerCondition.notify_all(); - _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + _watermark, std::memory_order_relaxed); + _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + get_watermark(), std::memory_order_relaxed); Lock lock(_mutex); if (numTasks() <= 0) { steady_time now = steady_clock::now(); @@ -159,10 +159,11 @@ SingleExecutor::wait_for_room(Lock & lock) { drain(lock); _tasks = std::make_unique<Task::UP[]>(_wantedTaskLimit); _taskLimit = _wantedTaskLimit.load(); + _watermark = _taskLimit * _watermarkRatio; } _queueSize.add(numTasks()); while (numTasks() >= _taskLimit.load(std::memory_order_relaxed)) { - sleepProducer(lock, _reactionTime, wp - _watermark); + sleepProducer(lock, _reactionTime, wp - get_watermark()); } } diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h index 7d868322558..e76e3f17a41 100644 --- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h +++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h @@ -28,7 +28,7 @@ public: void wakeup() override; size_t getNumThreads() const override; uint32_t getTaskLimit() const override { return _taskLimit.load(std::memory_order_relaxed); } - uint32_t get_watermark() const { return _watermark; } + uint32_t get_watermark() const { return _watermark.load(std::memory_order_relaxed); } duration get_reaction_time() const { return _reactionTime; } ExecutorStats getStats() override; SingleExecutor & shutdown() override; @@ -47,6 +47,7 @@ private: uint64_t numTasks() const { return _wp.load(std::memory_order_relaxed) - _rp.load(std::memory_order_acquire); } + const double _watermarkRatio; std::atomic<uint32_t> _taskLimit; std::atomic<uint32_t> _wantedTaskLimit; std::atomic<uint64_t> _rp; @@ -63,7 +64,7 @@ private: std::atomic<uint64_t> _wakeupConsumerAt; std::atomic<uint64_t> _producerNeedWakeupAt; std::atomic<uint64_t> _wp; - const uint32_t _watermark; + std::atomic<uint32_t> _watermark; const duration _reactionTime; bool _closed; }; diff --git a/standalone-container/src/main/sh/standalone-container.sh b/standalone-container/src/main/sh/standalone-container.sh index f3048690eec..b34535c6867 100755 --- a/standalone-container/src/main/sh/standalone-container.sh +++ b/standalone-container/src/main/sh/standalone-container.sh @@ -175,7 +175,6 @@ StartCommand() { --add-opens=java.base/java.nio=ALL-UNNAMED \ --add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ - --add-opens=java.base/sun.security.util=ALL-UNNAMED \ -Djava.library.path="$VESPA_HOME/lib64" \ -Djava.awt.headless=true \ -Dsun.rmi.dgc.client.gcInterval=3600000 \ diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp index dfe4f09de3f..9fecb005659 100644 --- a/storage/src/tests/distributor/getoperationtest.cpp +++ b/storage/src/tests/distributor/getoperationtest.cpp @@ -267,6 +267,26 @@ TEST_F(GetOperationTest, send_to_all_invalid_nodes_when_inconsistent) { EXPECT_EQ("newauthor", getLastReplyAuthor()); } +// GetOperation document-level consistency checks are used by the multi-phase update +// logic to see if we can fall back to a fast path even though not all replicas are in sync. +// Empty replicas are not considered part of the send-set, so only looking at replies from +// replicas _sent_ to will not detect this case. +// If we haphazardly treat an empty replicas as implicitly being in sync we risk triggering +// undetectable inconsistencies at the document level. This can happen if we send create-if-missing +// updates to an empty replica as well as a non-empty replica, and the document exists in the +// latter replica. The document would then be implicitly created on the empty replica with the +// same timestamp as that of the non-empty one, even though their contents would almost +// certainly differ. +TEST_F(GetOperationTest, get_not_sent_to_empty_replicas_but_bucket_tagged_as_inconsistent) { + setClusterState("distributor:1 storage:4"); + addNodesToBucketDB(bucketId, "2=0/0/0,3=1/2/3"); + sendGet(); + ASSERT_EQ("Get => 3", _sender.getCommands(true)); + ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2)); + EXPECT_FALSE(op->any_replicas_failed()); + EXPECT_FALSE(last_reply_had_consistent_replicas()); +} + TEST_F(GetOperationTest, inconsistent_split) { setClusterState("distributor:1 storage:4"); diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp index a047fb7d79c..b02395717e0 100644 --- a/storage/src/tests/distributor/putoperationtest.cpp +++ b/storage/src/tests/distributor/putoperationtest.cpp @@ -51,9 +51,8 @@ public: document::BucketId createAndSendSampleDocument(vespalib::duration timeout); void sendReply(int idx = -1, - api::ReturnCode::Result result - = api::ReturnCode::OK, - api::BucketInfo info = api::BucketInfo(1,2,3,4,5)) + api::ReturnCode::Result result = api::ReturnCode::OK, + api::BucketInfo info = api::BucketInfo(1,2,3,4,5)) { ASSERT_FALSE(_sender.commands().empty()); if (idx == -1) { @@ -152,6 +151,33 @@ TEST_F(PutOperationTest, bucket_database_gets_special_entry_when_CreateBucket_se ASSERT_EQ("Create bucket => 0,Put => 0", _sender.getCommands(true)); } +TEST_F(PutOperationTest, failed_CreateBucket_removes_replica_from_db_and_sends_RequestBucketInfo) { + setup_stripe(2, 2, "distributor:1 storage:2"); + + auto doc = createDummyDocument("test", "test"); + sendPut(createPut(doc)); + + ASSERT_EQ("Create bucket => 1,Create bucket => 0,Put => 1,Put => 0", _sender.getCommands(true)); + + // Simulate timeouts on node 1. Replica existence is in a Schrödinger's cat state until we send + // a RequestBucketInfo to the node and open the box to find out for sure. + sendReply(0, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket + sendReply(2, api::ReturnCode::TIMEOUT, api::BucketInfo()); // Put + // Pretend everything went fine on node 0 + sendReply(1); // CreateBucket + sendReply(3); // Put + + ASSERT_EQ("BucketId(0x4000000000008f09) : " + "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)", + dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId()))); + + // TODO remove revert concept; does not make sense with Proton (since it's not a multi-version store and + // therefore does not have anything to revert back to) and is config-disabled by default for this provider. + ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1," + "Revert(BucketId(0x4000000000008f09)) => 0", + _sender.getCommands(true, true, 4)); +} + TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) { setup_stripe(1, 1, "storage:1 distributor:1"); auto cfg = make_config(); diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp index a3f0182ba30..1752de5fb80 100644 --- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp +++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp @@ -22,6 +22,8 @@ namespace storage { namespace { +VESPA_THREAD_STACK_TAG(test_thread); + // Exploit the fact that PersistenceProviderWrapper already provides a forwarding // implementation of all SPI calls, so we can selectively override. class BlockingMockProvider : public PersistenceProviderWrapper @@ -294,7 +296,7 @@ TEST_F(OperationAbortingTest, wait_for_current_operation_completion_for_aborted_ auto abortCmd = makeAbortCmd(abortSet); SendTask sendTask(abortCmd, *_queueBarrier, c.top); - vespalib::Thread thread(sendTask); + vespalib::Thread thread(sendTask, test_thread); thread.start(); LOG(debug, "waiting for threads to reach barriers"); diff --git a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp index 06872cadde6..868de8d0ae2 100644 --- a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp +++ b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp @@ -267,6 +267,11 @@ GetOperation::assignTargetNodeGroups(const BucketDatabase::ReadGuard& read_guard _responses[GroupId(e.getBucketId(), copy.getChecksum(), copy.getNode())].emplace_back(copy); } else if (!copy.empty()) { _responses[GroupId(e.getBucketId(), copy.getChecksum(), -1)].emplace_back(copy); + } else { // empty replica + // We must treat a bucket with empty replicas as inherently inconsistent. + // See GetOperationTest::get_not_sent_to_empty_replicas_but_bucket_tagged_as_inconsistent for + // rationale as to why this is the case. + _has_replica_inconsistency = true; } } } diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp index a16eef0ab6f..5233e5678fa 100644 --- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp +++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp @@ -90,6 +90,7 @@ UpdateOperation::onStart(DistributorStripeMessageSender& sender) // An UpdateOperation should only be started iff all replicas are consistent // with each other, so sampling a single replica should be equal to sampling them all. + // FIXME this no longer holds when replicas are consistent at the _document_ level but not at the _bucket_ level. assert(_entries[0].getBucketInfo().getNodeCount() > 0); // Empty buckets are not allowed _infoAtSendTime = _entries[0].getBucketInfo().getNodeRef(0).getBucketInfo(); diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp index 8cacbb0bf5a..45129f7be04 100644 --- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp +++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp @@ -259,7 +259,14 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply( && reply.getResult().getResult() != api::ReturnCode::EXISTS) { LOG(spam, "Create bucket reply failed, so deleting it from bucket db"); + // We don't know if the bucket exists at this point, so we remove it from the DB. + // If we get subsequent write load the bucket will be implicitly created again + // (which is an idempotent operation) and all is well. But since we don't know _if_ + // we'll get any further write load we send a RequestBucketInfo to bring the bucket + // back into the DB if it _was_ successfully created. We have to do the latter to + // avoid the risk of introducing an orphaned bucket replica on the content node. _op_ctx.remove_node_from_bucket_database(reply.getBucket(), node); + _op_ctx.recheck_bucket_info(node, reply.getBucket()); } } diff --git a/vbench/src/apps/vbench/vbench.cpp b/vbench/src/apps/vbench/vbench.cpp index 00499519dcc..edaa68b8838 100644 --- a/vbench/src/apps/vbench/vbench.cpp +++ b/vbench/src/apps/vbench/vbench.cpp @@ -8,6 +8,8 @@ using namespace vbench; +VESPA_THREAD_STACK_TAG(vbench_thread); + typedef vespalib::SignalHandler SIG; struct NotifyDone : public vespalib::Runnable { @@ -31,8 +33,7 @@ int run(const std::string &cfg_name) { return 1; } vespalib::Slime cfg; - vespalib::Memory mapped_cfg(cfg_file.get().data, - cfg_file.get().size); + vespalib::Memory mapped_cfg(cfg_file.get().data, cfg_file.get().size); if (!vespalib::slime::JsonFormat::decode(mapped_cfg, cfg)) { fprintf(stderr, "unable to parse config file: %s\n", cfg.toString().c_str()); @@ -43,7 +44,7 @@ int run(const std::string &cfg_name) { VBench vbench(cfg); NotifyDone notify(done); vespalib::RunnablePair runBoth(vbench, notify); - vespalib::Thread thread(runBoth); + vespalib::Thread thread(runBoth, vbench_thread); thread.start(); while (!SIG::INT.check() && !SIG::TERM.check() && !done.await(1s)) {} if (!done.await(vespalib::duration::zero())) { diff --git a/vbench/src/tests/dispatcher/dispatcher_test.cpp b/vbench/src/tests/dispatcher/dispatcher_test.cpp index b2c002e3e50..618940aab57 100644 --- a/vbench/src/tests/dispatcher/dispatcher_test.cpp +++ b/vbench/src/tests/dispatcher/dispatcher_test.cpp @@ -17,6 +17,9 @@ struct Fetcher : public vespalib::Runnable { void run() override { handler.handle(provider.provide()); } }; +VESPA_THREAD_STACK_TAG(fetcher1_thread); +VESPA_THREAD_STACK_TAG(fetcher2_thread); + TEST("dispatcher") { MyHandler dropped; MyHandler handler1; @@ -24,8 +27,8 @@ TEST("dispatcher") { Dispatcher<int> dispatcher(dropped); Fetcher fetcher1(dispatcher, handler1); Fetcher fetcher2(dispatcher, handler2); - vespalib::Thread thread1(fetcher1); - vespalib::Thread thread2(fetcher2); + vespalib::Thread thread1(fetcher1, fetcher1_thread); + vespalib::Thread thread2(fetcher2, fetcher2_thread); thread1.start(); EXPECT_TRUE(dispatcher.waitForThreads(1, 512)); thread2.start(); diff --git a/vbench/src/tests/handler_thread/handler_thread_test.cpp b/vbench/src/tests/handler_thread/handler_thread_test.cpp index fd7d630f705..97a12e82ac8 100644 --- a/vbench/src/tests/handler_thread/handler_thread_test.cpp +++ b/vbench/src/tests/handler_thread/handler_thread_test.cpp @@ -15,9 +15,11 @@ struct MyHandler : Handler<int> { MyHandler::~MyHandler() = default; +VESPA_THREAD_STACK_TAG(test_thread); + TEST("handler thread") { MyHandler handler; - HandlerThread<int> th(handler); + HandlerThread<int> th(handler, test_thread); th.handle(std::unique_ptr<int>(new int(1))); th.handle(std::unique_ptr<int>(new int(2))); th.handle(std::unique_ptr<int>(new int(3))); diff --git a/vbench/src/vbench/core/handler_thread.h b/vbench/src/vbench/core/handler_thread.h index b4aaf08eee8..402ecbeb0dc 100644 --- a/vbench/src/vbench/core/handler_thread.h +++ b/vbench/src/vbench/core/handler_thread.h @@ -33,7 +33,7 @@ private: void run() override; public: - HandlerThread(Handler<T> &next); + HandlerThread(Handler<T> &next, init_fun_t init_fun); ~HandlerThread(); void handle(std::unique_ptr<T> obj) override; void join() override; diff --git a/vbench/src/vbench/core/handler_thread.hpp b/vbench/src/vbench/core/handler_thread.hpp index 3d1dc423411..56cc0a7771d 100644 --- a/vbench/src/vbench/core/handler_thread.hpp +++ b/vbench/src/vbench/core/handler_thread.hpp @@ -23,12 +23,12 @@ HandlerThread<T>::run() } template <typename T> -HandlerThread<T>::HandlerThread(Handler<T> &next) +HandlerThread<T>::HandlerThread(Handler<T> &next, init_fun_t init_fun) : _lock(), _cond(), _queue(), _next(next), - _thread(*this), + _thread(*this, init_fun), _done(false) { _thread.start(); diff --git a/vbench/src/vbench/vbench/request_scheduler.cpp b/vbench/src/vbench/vbench/request_scheduler.cpp index 80aec6c308e..95d29181b1f 100644 --- a/vbench/src/vbench/vbench/request_scheduler.cpp +++ b/vbench/src/vbench/vbench/request_scheduler.cpp @@ -1,11 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "request_scheduler.h" - #include <vbench/core/timer.h> namespace vbench { +VESPA_THREAD_STACK_TAG(vbench_request_scheduler_thread); +VESPA_THREAD_STACK_TAG(vbench_handler_thread); + void RequestScheduler::run() { @@ -24,16 +26,16 @@ RequestScheduler::run() RequestScheduler::RequestScheduler(CryptoEngine::SP crypto, Handler<Request> &next, size_t numWorkers) : _timer(), - _proxy(next), + _proxy(next, vbench_handler_thread), _queue(10.0, 0.020), _droppedTagger(_proxy), _dispatcher(_droppedTagger), - _thread(*this), + _thread(*this, vbench_request_scheduler_thread), _connectionPool(std::move(crypto), _timer), _workers() { for (size_t i = 0; i < numWorkers; ++i) { - _workers.push_back(std::unique_ptr<Worker>(new Worker(_dispatcher, _proxy, _connectionPool, _timer))); + _workers.push_back(std::make_unique<Worker>(_dispatcher, _proxy, _connectionPool, _timer)); } _dispatcher.waitForThreads(numWorkers, 256); } diff --git a/vbench/src/vbench/vbench/vbench.cpp b/vbench/src/vbench/vbench/vbench.cpp index d636f7a1cd7..9a5adad262e 100644 --- a/vbench/src/vbench/vbench/vbench.cpp +++ b/vbench/src/vbench/vbench/vbench.cpp @@ -40,6 +40,8 @@ CryptoEngine::SP setup_crypto(const vespalib::slime::Inspector &tls) { } // namespace vbench::<unnamed> +VESPA_THREAD_STACK_TAG(vbench_inputchain_generator); + VBench::VBench(const vespalib::Slime &cfg) : _factory(), _analyzers(), @@ -76,7 +78,7 @@ VBench::VBench(const vespalib::Slime &cfg) } inputChain->generator = _factory.createGenerator(generator, *inputChain->taggers.back()); if (inputChain->generator.get() != 0) { - inputChain->thread.reset(new vespalib::Thread(*inputChain->generator)); + inputChain->thread.reset(new vespalib::Thread(*inputChain->generator, vbench_inputchain_generator)); _inputs.push_back(std::move(inputChain)); } } diff --git a/vbench/src/vbench/vbench/worker.cpp b/vbench/src/vbench/vbench/worker.cpp index a64956f710b..afccc7de39f 100644 --- a/vbench/src/vbench/vbench/worker.cpp +++ b/vbench/src/vbench/vbench/worker.cpp @@ -5,6 +5,8 @@ namespace vbench { +VESPA_THREAD_STACK_TAG(vbench_worker_thread); + void Worker::run() { @@ -22,7 +24,7 @@ Worker::run() Worker::Worker(Provider<Request> &provider, Handler<Request> &next, HttpConnectionPool &pool, Timer &timer) - : _thread(*this), + : _thread(*this, vbench_worker_thread), _provider(provider), _next(next), _pool(pool), diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java index 5f75ace6ac5..4b54b392d12 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/tls/AthenzX509CertificateUtils.java @@ -1,14 +1,20 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.athenz.tls; +import com.yahoo.security.SubjectAlternativeName; +import com.yahoo.security.X509CertificateUtils; import com.yahoo.vespa.athenz.api.AthenzIdentity; import com.yahoo.vespa.athenz.api.AthenzRole; import com.yahoo.vespa.athenz.utils.AthenzIdentities; +import java.net.URI; import java.security.cert.X509Certificate; import java.util.List; +import java.util.Optional; +import static com.yahoo.security.SubjectAlternativeName.Type.DNS_NAME; import static com.yahoo.security.SubjectAlternativeName.Type.RFC822_NAME; +import static com.yahoo.security.SubjectAlternativeName.Type.UNIFORM_RESOURCE_IDENTIFIER; /** * Utility methods for Athenz issued x509 certificates @@ -40,4 +46,44 @@ public class AthenzX509CertificateUtils { return AthenzIdentities.from(email.substring(0, separator)); } + /** @return Athenz unique instance id from an Athenz X.509 certificate (specified in the Subject Alternative Name extension) */ + public static Optional<String> getInstanceId(X509Certificate cert) { + return getInstanceId(X509CertificateUtils.getSubjectAlternativeNames(cert)); + } + + /** @return Athenz unique instance id from the Subject Alternative Name extension */ + public static Optional<String> getInstanceId(List<SubjectAlternativeName> sans) { + // Prefer instance id from SAN URI over the legacy DNS entry + return getAthenzUniqueInstanceIdFromSanUri(sans) + .or(() -> getAthenzUniqueInstanceIdFromSanDns(sans)); + } + + private static Optional<String> getAthenzUniqueInstanceIdFromSanUri(List<SubjectAlternativeName> sans) { + String uriPrefix = "athenz://instanceid/"; + return sans.stream() + .filter(san -> { + if (san.getType() != UNIFORM_RESOURCE_IDENTIFIER) return false; + return san.getValue().startsWith(uriPrefix); + }) + .map(san -> { + String uriPath = URI.create(san.getValue()).getPath(); + return uriPath.substring(uriPath.lastIndexOf('/') + 1); // last path segment contains instance id + }) + .findFirst(); + } + + private static Optional<String> getAthenzUniqueInstanceIdFromSanDns(List<SubjectAlternativeName> sans) { + String dnsNameDelimiter = ".instanceid.athenz."; + return sans.stream() + .filter(san -> { + if (san.getType() != DNS_NAME) return false; + return san.getValue().contains(dnsNameDelimiter); + }) + .map(san -> { + String dnsName = san.getValue(); + return dnsName.substring(0, dnsName.indexOf(dnsNameDelimiter)); + }) + .findFirst(); + } + } diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client-api/abi-spec.json index 7f78e81b447..cabe9afde20 100644 --- a/vespa-feed-client/abi-spec.json +++ b/vespa-feed-client-api/abi-spec.json @@ -1,20 +1,4 @@ { - "ai.vespa.feed.client.BenchmarkingCluster": { - "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.Cluster" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.Cluster)", - "public void dispatch(ai.vespa.feed.client.HttpRequest, java.util.concurrent.CompletableFuture)", - "public ai.vespa.feed.client.OperationStats stats()", - "public void close()" - ], - "fields": [] - }, "ai.vespa.feed.client.DocumentId": { "superClass": "java.lang.Object", "interfaces": [], @@ -37,21 +21,6 @@ ], "fields": [] }, - "ai.vespa.feed.client.DynamicThrottler": { - "superClass": "ai.vespa.feed.client.StaticThrottler", - "interfaces": [], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.FeedClientBuilder)", - "public void sent(long, java.util.concurrent.CompletableFuture)", - "public void success()", - "public void throttled(long)", - "public long targetInflight()" - ], - "fields": [] - }, "ai.vespa.feed.client.FeedClient$CircuitBreaker$State": { "superClass": "java.lang.Enum", "interfaces": [], @@ -145,27 +114,30 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ "public static ai.vespa.feed.client.FeedClientBuilder create(java.net.URI)", "public static ai.vespa.feed.client.FeedClientBuilder create(java.util.List)", - "public ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)", - "public ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)", - "public ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)", - "public ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)", - "public ai.vespa.feed.client.FeedClientBuilder noBenchmarking()", - "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)", - "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)", - "public ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)", - "public ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)", - "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)", - "public ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)", - "public ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)", - "public ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)", - "public ai.vespa.feed.client.FeedClient build()" + "public abstract ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)", + "public abstract ai.vespa.feed.client.FeedClientBuilder noBenchmarking()", + "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)", + "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)", + "public abstract ai.vespa.feed.client.FeedClientBuilder setEndpointUris(java.util.List)", + "public abstract ai.vespa.feed.client.FeedClient build()" ], "fields": [] }, @@ -186,21 +158,18 @@ ], "fields": [] }, - "ai.vespa.feed.client.GracePeriodCircuitBreaker": { + "ai.vespa.feed.client.HttpResponse": { "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.FeedClient$CircuitBreaker" - ], + "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ - "public void <init>(java.time.Duration)", - "public void <init>(java.time.Duration, java.time.Duration)", - "public void success()", - "public void failure(ai.vespa.feed.client.HttpResponse)", - "public void failure(java.lang.Throwable)", - "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()" + "public abstract int code()", + "public abstract byte[] body()", + "public static ai.vespa.feed.client.HttpResponse of(int, byte[])" ], "fields": [] }, @@ -333,14 +302,15 @@ "superClass": "java.lang.Object", "interfaces": [], "attributes": [ - "public" + "public", + "interface", + "abstract" ], "methods": [ - "public ai.vespa.feed.client.Result$Type type()", - "public ai.vespa.feed.client.DocumentId documentId()", - "public java.util.Optional resultMessage()", - "public java.util.Optional traceMessage()", - "public java.lang.String toString()" + "public abstract ai.vespa.feed.client.Result$Type type()", + "public abstract ai.vespa.feed.client.DocumentId documentId()", + "public abstract java.util.Optional resultMessage()", + "public abstract java.util.Optional traceMessage()" ], "fields": [] }, @@ -367,25 +337,5 @@ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)" ], "fields": [] - }, - "ai.vespa.feed.client.StaticThrottler": { - "superClass": "java.lang.Object", - "interfaces": [ - "ai.vespa.feed.client.Throttler" - ], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(ai.vespa.feed.client.FeedClientBuilder)", - "public void sent(long, java.util.concurrent.CompletableFuture)", - "public void success()", - "public void throttled(long)", - "public long targetInflight()" - ], - "fields": [ - "protected final long maxInflight", - "protected final long minInflight" - ] } }
\ No newline at end of file diff --git a/vespa-feed-client-api/pom.xml b/vespa-feed-client-api/pom.xml new file mode 100644 index 00000000000..df5fd531f06 --- /dev/null +++ b/vespa-feed-client-api/pom.xml @@ -0,0 +1,57 @@ +<?xml version="1.0"?> +<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. --> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>com.yahoo.vespa</groupId> + <artifactId>parent</artifactId> + <version>7-SNAPSHOT</version> + <relativePath>../parent/pom.xml</relativePath> + </parent> + <artifactId>vespa-feed-client-api</artifactId> + <packaging>jar</packaging> + <version>7-SNAPSHOT</version> + + <dependencies> + <!-- compile scope --> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>annotations</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>com.fasterxml.jackson.core</groupId> + <artifactId>jackson-core</artifactId> + <scope>compile</scope> + </dependency> + + <!-- test scope --> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter</artifactId> + <scope>test</scope> + </dependency> + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + <configuration> + <release>${vespaClients.jdk.releaseVersion}</release> + <showDeprecation>true</showDeprecation> + <compilerArgs> + <arg>-Xlint:all</arg> + <arg>-Xlint:-serial</arg> + <arg>-Werror</arg> + </compilerArgs> + </configuration> + </plugin> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + </plugins> + </build> +</project> diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java index 5474bcfda01..5474bcfda01 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java index d463c611d6a..d463c611d6a 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java new file mode 100644 index 00000000000..daf3f62dac1 --- /dev/null +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java @@ -0,0 +1,128 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.net.URI; +import java.nio.file.Path; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ServiceLoader; +import java.util.function.Supplier; + +/** + * Builder for creating a {@link FeedClient} instance. + * + * @author bjorncs + * @author jonmv + */ +public interface FeedClientBuilder { + + /** Creates a builder for a single container endpoint **/ + static FeedClientBuilder create(URI endpoint) { return create(Collections.singletonList(endpoint)); } + + /** Creates a builder for multiple container endpoints **/ + static FeedClientBuilder create(List<URI> endpoints) { + Iterator<FeedClientBuilder> iterator = ServiceLoader.load(FeedClientBuilder.class).iterator(); + if (iterator.hasNext()) { + return iterator.next().setEndpointUris(endpoints); + } else { + try { + Class<?> aClass = Class.forName("ai.vespa.feed.client.impl.FeedClientBuilderImpl"); + for (Constructor<?> constructor : aClass.getConstructors()) { + if (constructor.getParameterTypes().length==0) { + return ((FeedClientBuilder)constructor.newInstance()).setEndpointUris(endpoints); + } + } + throw new RuntimeException("Could not find Feed client builder implementation"); + } catch (ClassNotFoundException | InvocationTargetException | InstantiationException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + } + + /** + * Sets the number of connections this client will use per endpoint. + * + * A reasonable value here is a value that lets all feed clients (if more than one) + * collectively have a number of connections which is a small multiple of the numbers + * of containers in the cluster to feed, so load can be balanced across these containers. + * In general, this value should be kept as low as possible, but poor connectivity + * between feeder and cluster may also warrant a higher number of connections. + */ + FeedClientBuilder setConnectionsPerEndpoint(int max); + + /** + * Sets the maximum number of streams per HTTP/2 connection for this client. + * + * This determines the maximum number of concurrent, inflight requests for this client, + * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over + * more connections, when possible. + * The feed client automatically throttles load to achieve the best throughput, and the + * actual number of streams per connection is usually lower than the maximum. + */ + FeedClientBuilder setMaxStreamPerConnection(int max); + + /** Sets {@link SSLContext} instance. */ + FeedClientBuilder setSslContext(SSLContext context); + + /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */ + FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier); + + /** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */ + FeedClientBuilder noBenchmarking(); + + /** Adds HTTP request header to all client requests. */ + FeedClientBuilder addRequestHeader(String name, String value); + + /** + * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request, + * i.e. value can be dynamically updated during a feed. + */ + FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier); + + /** + * Overrides default retry strategy. + * @see FeedClient.RetryStrategy + */ + FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy); + + /** + * Overrides default circuit breaker. + * @see FeedClient.CircuitBreaker + */ + FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker); + + /** Sets path to client SSL certificate/key PEM files */ + FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile); + + /** Sets client SSL certificates/key */ + FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey); + + /** Sets client SSL certificate/key */ + FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey); + + FeedClientBuilder setDryrun(boolean enabled); + + /** + * Overrides JVM default SSL truststore + * @param caCertificatesFile Path to PEM encoded file containing trusted certificates + */ + FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile); + + /** Overrides JVM default SSL truststore */ + FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates); + + /** Overrides endpoint URIs for this client */ + FeedClientBuilder setEndpointUris(List<URI> endpoints); + + /** Constructs instance of {@link FeedClient} from builder configuration */ + FeedClient build(); + +} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java index 1936eb09418..1936eb09418 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java index 07fdb2d7257..62850fef32d 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java @@ -1,7 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; -interface HttpResponse { +public interface HttpResponse { int code(); byte[] body(); diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java index 2d7caea9f26..41b432449df 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java @@ -387,13 +387,13 @@ public class JsonFeeder implements Closeable { CompletableFuture<Result> next() throws IOException { JsonToken token = parser.nextToken(); - if (multipleOperations && ! arrayPrefixParsed && token == START_ARRAY) { + if (multipleOperations && ! arrayPrefixParsed && token == JsonToken.START_ARRAY) { arrayPrefixParsed = true; token = parser.nextToken(); } - if (token == END_ARRAY && multipleOperations) return null; + if (token == JsonToken.END_ARRAY && multipleOperations) return null; else if (token == null && ! arrayPrefixParsed) return null; - else if (token != START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'"); + else if (token != JsonToken.START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'"); long start = 0, end = -1; OperationType type = null; DocumentId id = null; @@ -459,8 +459,8 @@ public class JsonFeeder implements Closeable { private String readString() throws IOException { String value = parser.nextTextValue(); if (value == null) - throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + - ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); + throw new OperationParseException("Expected '" + JsonToken.VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; } @@ -468,8 +468,8 @@ public class JsonFeeder implements Closeable { private boolean readBoolean() throws IOException { Boolean value = parser.nextBooleanValue(); if (value == null) - throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + - ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); + throw new OperationParseException("Expected '" + JsonToken.VALUE_FALSE + "' or '" + JsonToken.VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java index 0ec40e114df..0ec40e114df 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java index f60368dd67f..4404462be2e 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.FeedException; + /** * Signals that supplied JSON for a document/operation is invalid * diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java index ab2faf245d8..ab2faf245d8 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java new file mode 100644 index 00000000000..fa114f6a183 --- /dev/null +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java @@ -0,0 +1,23 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +import java.util.Optional; + +/** + * Result for a document operation which completed normally. + * + * @author bjorncs + * @author jonmv + */ +public interface Result { + + enum Type { + success, + conditionNotMet + } + + Type type(); + DocumentId documentId(); + Optional<String> resultMessage(); + Optional<String> traceMessage(); +} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java index d9eaff40d74..27803898c01 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java @@ -1,6 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.OperationParameters; + import java.util.Optional; /** diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java index 947ab9f0560..f149b13196b 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedException; + /** * Signals that the client was unable to obtain a proper response/result from container * diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java index daab16a9ff2..daab16a9ff2 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java +++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java index e4fb5cb5bef..d795678db39 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; @@ -14,6 +15,7 @@ import java.util.Collection; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -148,7 +150,7 @@ class JsonFeederTest { " }\n" + " }\n"; Result result = feeder.feedSingle(json).get(); - assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId()); + Assertions.assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId()); assertEquals(Result.Type.success, result.type()); assertEquals("success", result.resultMessage().get()); client.assertPutOperation("abc1", "{\"fields\":{\n \"lul\":\"lal\"\n }}"); @@ -188,7 +190,12 @@ class JsonFeederTest { public void close(boolean graceful) { } private CompletableFuture<Result> createSuccessResult(DocumentId documentId) { - return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null)); + return CompletableFuture.completedFuture(new Result(){ + @Override public Type type() { return Type.success; } + @Override public DocumentId documentId() { return documentId; } + @Override public Optional<String> resultMessage() { return Optional.of("success"); } + @Override public Optional<String> traceMessage() { return Optional.empty(); } + }); } void assertDocumentIds(Collection<DocumentId> keys, String... expectedUserSpecificIds) { diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java index b951fb62fb5..b951fb62fb5 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java index 3d4ce150fcf..3d4ce150fcf 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java index 4e6473a6568..4e6473a6568 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java +++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml index aff625fe3a4..16d6f8827f2 100644 --- a/vespa-feed-client-cli/pom.xml +++ b/vespa-feed-client-cli/pom.xml @@ -74,7 +74,7 @@ <attach>false</attach> <archive> <manifest> - <mainClass>ai.vespa.feed.client.CliClient</mainClass> + <mainClass>ai.vespa.feed.client.impl.CliClient</mainClass> </manifest> </archive> <descriptorRefs> diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java index 0de81d2de36..2fc7e5af7b4 100644 --- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java +++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java index e40b543f26a..7e036b8dec3 100644 --- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java +++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java @@ -1,7 +1,14 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.JsonFeeder; import ai.vespa.feed.client.JsonFeeder.ResultCallback; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh index b236a516691..c4e70c362b0 100755 --- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh +++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh @@ -6,4 +6,4 @@ exec java \ -Xms128m -Xmx2048m \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ -Djava.util.logging.config.file=`dirname $0`/logging.properties \ --cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@" +-cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@" diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh index fbd172e7423..7dbdc056524 100755 --- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh +++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh @@ -81,4 +81,4 @@ exec java \ -Xms128m -Xmx2048m $(getJavaOptionsIPV46) \ --add-opens=java.base/sun.security.ssl=ALL-UNNAMED \ -Djava.util.logging.config.file=${VESPA_HOME}/conf/vespa-feed-client/logging.properties \ --cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@" +-cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@" diff --git a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java index 622956db530..19b93c3172b 100644 --- a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java +++ b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.impl.CliArguments; import org.junit.jupiter.api.Test; import java.io.ByteArrayOutputStream; diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml index 68c9e4b4b7c..a53e7f78b20 100644 --- a/vespa-feed-client/pom.xml +++ b/vespa-feed-client/pom.xml @@ -34,6 +34,11 @@ <artifactId>jackson-core</artifactId> <scope>compile</scope> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespa-feed-client-api</artifactId> + <version>${project.version}</version> + </dependency> <!-- test scope --> <dependency> @@ -72,17 +77,13 @@ <executable>src/main/sh/vespa-version-generator.sh</executable> <arguments> <argument>${project.basedir}/../dist/vtag.map</argument> - <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/Vespa.java</argument> + <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/impl/Vespa.java</argument> </arguments> <sourceRoot>${project.build.directory}/generated-sources/vespa-version</sourceRoot> </configuration> </execution> </executions> </plugin> - <plugin> - <groupId>com.yahoo.vespa</groupId> - <artifactId>abi-check-plugin</artifactId> - </plugin> </plugins> </build> </project> diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java index 52d7af2fb31..6dc9ec4efb1 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.HttpResponse; import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; import org.apache.hc.client5.http.async.methods.SimpleHttpResponse; import org.apache.hc.client5.http.config.RequestConfig; @@ -18,7 +19,6 @@ import org.apache.hc.core5.util.Timeout; import javax.net.ssl.SSLContext; import java.io.IOException; import java.net.URI; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -43,7 +43,7 @@ class ApacheCluster implements Cluster { .setResponseTimeout(Timeout.ofMinutes(5)) .build(); - ApacheCluster(FeedClientBuilder builder) throws IOException { + ApacheCluster(FeedClientBuilderImpl builder) throws IOException { for (URI endpoint : builder.endpoints) for (int i = 0; i < builder.connectionsPerEndpoint; i++) endpoints.add(new Endpoint(createHttpClient(builder), endpoint)); @@ -114,7 +114,7 @@ class ApacheCluster implements Cluster { } - private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder) throws IOException { + private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java index 05ff6e99308..40049bad217 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.util.HashMap; import java.util.Map; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java index 57c028426fe..ee9188fdc2b 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java @@ -1,8 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.io.Closeable; -import java.util.Collections; import java.util.concurrent.CompletableFuture; /** diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java index 282e4e14285..96cf7998681 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.nio.charset.StandardCharsets; import java.time.Duration; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java index a379a8b066b..5969fe267c0 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java @@ -1,7 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; -import java.util.Arrays; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; @@ -25,7 +26,7 @@ public class DynamicThrottler extends StaticThrottler { private long startNanos = System.nanoTime(); private long sent = 0; - public DynamicThrottler(FeedClientBuilder builder) { + public DynamicThrottler(FeedClientBuilderImpl builder) { super(builder); targetInflight = new AtomicLong(8 * minInflight); } diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java index 3b79d47b494..7dafeb0b541 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; @@ -16,6 +19,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Supplier; import static java.util.Objects.requireNonNull; @@ -26,11 +30,11 @@ import static java.util.Objects.requireNonNull; * @author bjorncs * @author jonmv */ -public class FeedClientBuilder { +public class FeedClientBuilderImpl implements FeedClientBuilder { static final FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { }; - final List<URI> endpoints; + List<URI> endpoints; final Map<String, Supplier<String>> requestHeaders = new HashMap<>(); SSLContext sslContext; HostnameVerifier hostnameVerifier; @@ -47,72 +51,65 @@ public class FeedClientBuilder { boolean benchmark = true; boolean dryrun = false; - /** Creates a builder for a single container endpoint **/ - public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(Collections.singletonList(endpoint)); } - /** Creates a builder for multiple container endpoints **/ - public static FeedClientBuilder create(List<URI> endpoints) { return new FeedClientBuilder(endpoints); } - private FeedClientBuilder(List<URI> endpoints) { + public FeedClientBuilderImpl() { + } + + FeedClientBuilderImpl(List<URI> endpoints) { + this(); + setEndpointUris(endpoints); + } + + @Override + public FeedClientBuilder setEndpointUris(List<URI> endpoints) { if (endpoints.isEmpty()) throw new IllegalArgumentException("At least one endpoint must be provided"); for (URI endpoint : endpoints) requireNonNull(endpoint.getHost()); - this.endpoints = new ArrayList<>(endpoints); + return this; } - /** - * Sets the number of connections this client will use per endpoint. - * - * A reasonable value here is a value that lets all feed clients (if more than one) - * collectively have a number of connections which is a small multiple of the numbers - * of containers in the cluster to feed, so load can be balanced across these containers. - * In general, this value should be kept as low as possible, but poor connectivity - * between feeder and cluster may also warrant a higher number of connections. - */ - public FeedClientBuilder setConnectionsPerEndpoint(int max) { + @Override + public FeedClientBuilderImpl setConnectionsPerEndpoint(int max) { if (max < 1) throw new IllegalArgumentException("Max connections must be at least 1, but was " + max); this.connectionsPerEndpoint = max; return this; } - /** - * Sets the maximum number of streams per HTTP/2 connection for this client. - * - * This determines the maximum number of concurrent, inflight requests for this client, - * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over - * more connections, when possible. - * The feed client automatically throttles load to achieve the best throughput, and the - * actual number of streams per connection is usually lower than the maximum. - */ - public FeedClientBuilder setMaxStreamPerConnection(int max) { + @Override + public FeedClientBuilderImpl setMaxStreamPerConnection(int max) { if (max < 1) throw new IllegalArgumentException("Max streams per connection must be at least 1, but was " + max); this.maxStreamsPerConnection = max; return this; } /** Sets {@link SSLContext} instance. */ - public FeedClientBuilder setSslContext(SSLContext context) { + @Override + public FeedClientBuilderImpl setSslContext(SSLContext context) { this.sslContext = requireNonNull(context); return this; } /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */ - public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) { + @Override + public FeedClientBuilderImpl setHostnameVerifier(HostnameVerifier verifier) { this.hostnameVerifier = requireNonNull(verifier); return this; } /** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */ - public FeedClientBuilder noBenchmarking() { + @Override + public FeedClientBuilderImpl noBenchmarking() { this.benchmark = false; return this; } /** Adds HTTP request header to all client requests. */ - public FeedClientBuilder addRequestHeader(String name, String value) { + @Override + public FeedClientBuilderImpl addRequestHeader(String name, String value) { return addRequestHeader(name, () -> requireNonNull(value)); } @@ -120,7 +117,8 @@ public class FeedClientBuilder { * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request, * i.e. value can be dynamically updated during a feed. */ - public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) { + @Override + public FeedClientBuilderImpl addRequestHeader(String name, Supplier<String> valueSupplier) { this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier)); return this; } @@ -129,7 +127,8 @@ public class FeedClientBuilder { * Overrides default retry strategy. * @see FeedClient.RetryStrategy */ - public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) { + @Override + public FeedClientBuilderImpl setRetryStrategy(FeedClient.RetryStrategy strategy) { this.retryStrategy = requireNonNull(strategy); return this; } @@ -138,31 +137,36 @@ public class FeedClientBuilder { * Overrides default circuit breaker. * @see FeedClient.CircuitBreaker */ - public FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker) { + @Override + public FeedClientBuilderImpl setCircuitBreaker(FeedClient.CircuitBreaker breaker) { this.circuitBreaker = requireNonNull(breaker); return this; } /** Sets path to client SSL certificate/key PEM files */ - public FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile) { + @Override + public FeedClientBuilderImpl setCertificate(Path certificatePemFile, Path privateKeyPemFile) { this.certificateFile = certificatePemFile; this.privateKeyFile = privateKeyPemFile; return this; } /** Sets client SSL certificates/key */ - public FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) { + @Override + public FeedClientBuilderImpl setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) { this.certificate = certificate; this.privateKey = privateKey; return this; } /** Sets client SSL certificate/key */ - public FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey) { + @Override + public FeedClientBuilderImpl setCertificate(X509Certificate certificate, PrivateKey privateKey) { return setCertificate(Collections.singletonList(certificate), privateKey); } - public FeedClientBuilder setDryrun(boolean enabled) { + @Override + public FeedClientBuilderImpl setDryrun(boolean enabled) { this.dryrun = enabled; return this; } @@ -171,18 +175,21 @@ public class FeedClientBuilder { * Overrides JVM default SSL truststore * @param caCertificatesFile Path to PEM encoded file containing trusted certificates */ - public FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile) { + @Override + public FeedClientBuilderImpl setCaCertificatesFile(Path caCertificatesFile) { this.caCertificatesFile = caCertificatesFile; return this; } /** Overrides JVM default SSL truststore */ - public FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates) { + @Override + public FeedClientBuilderImpl setCaCertificates(Collection<X509Certificate> caCertificates) { this.caCertificates = caCertificates; return this; } /** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */ + @Override public FeedClient build() { try { validateConfiguration(); @@ -209,6 +216,9 @@ public class FeedClientBuilder { } private void validateConfiguration() { + if (endpoints == null) { + throw new IllegalArgumentException("At least one endpoint must be provided"); + } if (sslContext != null && ( certificateFile != null || caCertificatesFile != null || privateKeyFile != null || certificate != null || caCertificates != null || privateKey != null)) { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java index cb5e35c79a5..b223fce7cab 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.HttpResponse; import java.time.Duration; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java index eb818ba1d48..3fd44596d63 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java @@ -1,6 +1,15 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; - +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; +import ai.vespa.feed.client.ResultParseException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; @@ -33,11 +42,11 @@ class HttpFeedClient implements FeedClient { private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); - HttpFeedClient(FeedClientBuilder builder) throws IOException { + HttpFeedClient(FeedClientBuilderImpl builder) throws IOException { this(builder, new HttpRequestStrategy(builder)); } - HttpFeedClient(FeedClientBuilder builder, RequestStrategy requestStrategy) { + HttpFeedClient(FeedClientBuilderImpl builder, RequestStrategy requestStrategy) { this.requestHeaders = new HashMap<>(builder.requestHeaders); this.requestStrategy = requestStrategy; } @@ -173,7 +182,7 @@ class HttpFeedClient implements FeedClient { if (outcome == Outcome.vespaFailure) throw new ResultException(documentId, message, trace); - return new Result(toResultType(outcome), documentId, message, trace); + return new ResultImpl(toResultType(outcome), documentId, message, trace); } static String getPath(DocumentId documentId) { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java index 48defd71ea8..08b8ca08c61 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import java.util.Map; import java.util.function.Supplier; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java index cf65a874f3b..6fec0029bc3 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java @@ -1,8 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; import ai.vespa.feed.client.FeedClient.CircuitBreaker; import ai.vespa.feed.client.FeedClient.RetryStrategy; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse ; +import ai.vespa.feed.client.OperationStats; import java.io.IOException; import java.nio.channels.CancelledKeyException; @@ -62,11 +67,11 @@ class HttpRequestStrategy implements RequestStrategy { return thread; }); - HttpRequestStrategy(FeedClientBuilder builder) throws IOException { + HttpRequestStrategy(FeedClientBuilderImpl builder) throws IOException { this(builder, builder.dryrun ? new DryrunCluster() : new ApacheCluster(builder)); } - HttpRequestStrategy(FeedClientBuilder builder, Cluster cluster) { + HttpRequestStrategy(FeedClientBuilderImpl builder, Cluster cluster) { this.cluster = builder.benchmark ? new BenchmarkingCluster(cluster) : cluster; this.strategy = builder.retryStrategy; this.breaker = builder.circuitBreaker; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java index 9a97f7daa66..e3b6b594593 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java @@ -1,7 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; import ai.vespa.feed.client.FeedClient.CircuitBreaker.State; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import java.util.concurrent.CompletableFuture; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java index 5ff3fd0a219..dabf76cba34 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java @@ -1,5 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.Result; import java.util.Optional; @@ -9,29 +12,24 @@ import java.util.Optional; * @author bjorncs * @author jonmv */ -public class Result { +public class ResultImpl implements Result { private final Type type; private final DocumentId documentId; private final String resultMessage; private final String traceMessage; - Result(Type type, DocumentId documentId, String resultMessage, String traceMessage) { + ResultImpl(Type type, DocumentId documentId, String resultMessage, String traceMessage) { this.type = type; this.documentId = documentId; this.resultMessage = resultMessage; this.traceMessage = traceMessage; } - public enum Type { - success, - conditionNotMet - } - - public Type type() { return type; } - public DocumentId documentId() { return documentId; } - public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); } - public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); } + @Override public Type type() { return type; } + @Override public DocumentId documentId() { return documentId; } + @Override public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); } + @Override public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); } @Override public String toString() { diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java index f5e13eccd56..2ca4577abe6 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.bouncycastle.asn1.ASN1ObjectIdentifier; import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java index 5137a18d923..1f9cf8e5155 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; @@ -18,7 +20,7 @@ public class StaticThrottler implements Throttler { protected final long minInflight; private final AtomicLong targetX10; - public StaticThrottler(FeedClientBuilder builder) { + public StaticThrottler(FeedClientBuilderImpl builder) { minInflight = 16L * builder.connectionsPerEndpoint * builder.endpoints.size(); maxInflight = 256 * minInflight; // 4096 max streams per connection on the server side. targetX10 = new AtomicLong(10 * maxInflight); // 10x the actual value to allow for smaller updates. diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java index f2453c27879..700a6f6f805 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java @@ -1,5 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; + +import ai.vespa.feed.client.HttpResponse; import java.util.concurrent.CompletableFuture; diff --git a/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder new file mode 100644 index 00000000000..b6e28b1806c --- /dev/null +++ b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder @@ -0,0 +1,2 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +ai.vespa.feed.client.impl.FeedClientBuilderImpl
\ No newline at end of file diff --git a/vespa-feed-client/src/main/sh/vespa-version-generator.sh b/vespa-feed-client/src/main/sh/vespa-version-generator.sh index 5aafb3e2bf7..44fb7d167db 100755 --- a/vespa-feed-client/src/main/sh/vespa-version-generator.sh +++ b/vespa-feed-client/src/main/sh/vespa-version-generator.sh @@ -16,7 +16,7 @@ mkdir -p $destinationDir versionNumber=$(cat $source | grep V_TAG_COMPONENT | awk '{print $2}' ) cat > $destination <<- END -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; class Vespa { static final String VERSION = "$versionNumber"; diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java index df790056309..61526b80fe7 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -14,8 +16,8 @@ class DocumentIdTest { @Test void testParsing() { - assertEquals("id:ns:type::user", - DocumentId.of("id:ns:type::user").toString()); + Assertions.assertEquals("id:ns:type::user", + DocumentId.of("id:ns:type::user").toString()); assertEquals("id:ns:type:n=123:user", DocumentId.of("id:ns:type:n=123:user").toString()); diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java index 8eaffc3e9be..b7dac5ce52e 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import ai.vespa.feed.client.FeedClient.CircuitBreaker; import org.junit.jupiter.api.Test; diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java index d92958a5838..5353ab92fb6 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java @@ -1,10 +1,19 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.OperationStats; +import ai.vespa.feed.client.Result; +import ai.vespa.feed.client.ResultException; import org.junit.jupiter.api.Test; import java.net.URI; import java.time.Duration; +import java.util.Collections; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -33,7 +42,7 @@ class HttpFeedClientTest { @Override public void await() { throw new UnsupportedOperationException(); } @Override public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) { return dispatch.get().apply(documentId, request); } } - FeedClient client = new HttpFeedClient(FeedClientBuilder.create(URI.create("https://dummy:123")), new MockRequestStrategy()); + FeedClient client = new HttpFeedClient(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy:123"))), new MockRequestStrategy()); // Update is a PUT, and 200 OK is a success. dispatch.set((documentId, request) -> { diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java index 0f840201ca8..d293abf4f3e 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java @@ -1,19 +1,23 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; import ai.vespa.feed.client.FeedClient.CircuitBreaker; -import org.apache.hc.core5.http.ContentType; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.HttpResponse; +import ai.vespa.feed.client.OperationStats; import org.junit.jupiter.api.Test; import java.io.IOException; import java.net.URI; import java.time.Duration; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Phaser; import java.util.concurrent.ScheduledExecutorService; @@ -42,7 +46,7 @@ class HttpRequestStrategyTest { ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); Cluster cluster = new BenchmarkingCluster((__, vessel) -> executor.schedule(() -> vessel.complete(response), (int) (Math.random() * 2 * 10), TimeUnit.MILLISECONDS)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy( new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setConnectionsPerEndpoint(1 << 10) .setMaxStreamPerConnection(1 << 12), cluster); @@ -82,7 +86,7 @@ class HttpRequestStrategyTest { MockCluster cluster = new MockCluster(); AtomicLong now = new AtomicLong(0); CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setRetryStrategy(new FeedClient.RetryStrategy() { @Override public boolean retry(FeedClient.OperationType type) { return type == FeedClient.OperationType.PUT; } @Override public int retries() { return 1; } @@ -189,7 +193,7 @@ class HttpRequestStrategyTest { MockCluster cluster = new MockCluster(); AtomicLong now = new AtomicLong(0); CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10)); - HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123")) + HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123"))) .setRetryStrategy(new FeedClient.RetryStrategy() { @Override public int retries() { return 1; } }) diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java index a74f63f5cd2..f7c1b4d2b03 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java @@ -1,5 +1,5 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; +package ai.vespa.feed.client.impl; import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; @@ -8,6 +8,7 @@ import org.bouncycastle.operator.ContentSigner; import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; import org.bouncycastle.util.io.pem.PemObject; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -52,7 +53,7 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_from_pem_files() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCaCertificates(certificateFile) .withCertificateAndKey(certificateFile, privateKeyFile) @@ -62,13 +63,13 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_when_no_builder_parameter_given() { - SSLContext sslContext = assertDoesNotThrow(() -> new SslContextBuilder().build()); + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder().build()); assertEquals("TLS", sslContext.getProtocol()); } @Test void successfully_constructs_sslcontext_with_only_certificate_file() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCertificateAndKey(certificateFile, privateKeyFile) .build()); @@ -77,7 +78,7 @@ class SslContextBuilderTest { @Test void successfully_constructs_sslcontext_with_only_ca_certificate_file() { - SSLContext sslContext = assertDoesNotThrow(() -> + SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder() .withCaCertificates(certificateFile) .build()); diff --git a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java deleted file mode 100644 index 74baf9f1065..00000000000 --- a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; - -import ai.vespa.feed.client.Result.Type; - -/** - * Workaround for package-private {@link Result} constructor. - * - * @author bjorncs - */ -public class DryrunResult { - - private DryrunResult() {} - - public static Result create(Type type, DocumentId documentId, String resultMessage, String traceMessage) { - return new Result(type, documentId, resultMessage, traceMessage); - } -} diff --git a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java index 167e9b338a9..850513fb990 100644 --- a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java +++ b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java @@ -349,6 +349,9 @@ public class SimpleFeeder implements ReplyHandler { } return new JsonDestination(params.getDumpStream(), failure, numReplies); } + + + @SuppressWarnings("deprecation") SimpleFeeder(FeederParams params) { inputStreams = params.getInputStreams(); out = params.getStdOut(); diff --git a/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java b/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java index c2b3e9e4680..10184b35e4c 100644 --- a/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java +++ b/vespa_feed_perf/src/test/java/com/yahoo/vespa/feed/perf/SimpleServer.java @@ -28,6 +28,7 @@ public class SimpleServer { private final MessageBus mbus; private final DestinationSession session; + @SuppressWarnings("deprecation") public SimpleServer(String configDir, MessageHandler msgHandler) throws IOException, ListenFailedException { slobrok = new Slobrok(); documentMgr = new DocumentTypeManager(); @@ -53,6 +54,7 @@ public class SimpleServer { writer.close(); } + @SuppressWarnings("deprecation") public final void close() { session.destroy(); mbus.destroy(); diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java index 2dab634d8be..a1235c3821d 100644 --- a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java +++ b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java @@ -1,8 +1,15 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.concurrent; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import com.yahoo.yolean.UncheckedInterruptedException; + +import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; /** * Helper for {@link java.util.concurrent.CompletableFuture} / {@link java.util.concurrent.CompletionStage}. @@ -64,4 +71,64 @@ public class CompletableFutures { return combiner.combined; } + /** Similar to {@link CompletableFuture#allOf(CompletableFuture[])} but returns a list of the results */ + public static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) { + return CompletableFuture.allOf(futures.toArray(CompletableFuture[]::new)) + .thenApply(__ -> { + List<T> results = new ArrayList<>(); + for (CompletableFuture<T> f : futures) { + try { + results.add(f.get()); + } catch (InterruptedException | ExecutionException e) { + // Should not happen since all futures are completed without exception + throw new IllegalStateException(e); + } + } + return results; + }); + } + + /** + * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis + * @deprecated to be removed in Vespa 8 + */ + @SuppressWarnings("unchecked") + @Deprecated(forRemoval = true, since = "7") + public static <V> ListenableFuture<V> toGuavaListenableFuture(CompletableFuture<V> future) { + if (future instanceof ListenableFuture) { + return ((ListenableFuture<V>) future); + } + SettableFuture<V> guavaFuture = SettableFuture.create(); + future.whenComplete((result, error) -> { + if (result != null) guavaFuture.set(result); + else if (error instanceof CancellationException) guavaFuture.setException(error); + else guavaFuture.cancel(true); + }); + return guavaFuture; + } + + /** + * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis + * @deprecated to be removed in Vespa 8 + */ + @Deprecated(forRemoval = true, since = "7") + public static <V> CompletableFuture<V> toCompletableFuture(ListenableFuture<V> guavaFuture) { + CompletableFuture<V> future = new CompletableFuture<>(); + guavaFuture.addListener( + () -> { + if (guavaFuture.isCancelled()) future.cancel(true); + try { + V value = guavaFuture.get(); + future.complete(value); + } catch (InterruptedException e) { + // Should not happens since listener is invoked after future is complete + throw new UncheckedInterruptedException(e); + } catch (ExecutionException e) { + future.completeExceptionally(e.getCause()); + } + }, + Runnable::run); + return future; + } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java b/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java index 6450982540f..dbd9771afe9 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/ArrayValue.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class ArrayValue extends Value { private int capacity = 16; diff --git a/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java b/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java index 00f3adf82a1..5f40050a7df 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/BoolValue.java @@ -1,13 +1,18 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class BoolValue extends Value { + private static final BoolValue trueValue = new BoolValue(true); private static final BoolValue falseValue = new BoolValue(false); private final boolean value; private BoolValue(boolean value) { this.value = value; } - final public Type type() { return Type.BOOL; } - final public boolean asBool() { return this.value; } - public final void accept(Visitor v) { v.visitBool(value); } + public Type type() { return Type.BOOL; } + public boolean asBool() { return this.value; } + public void accept(Visitor v) { v.visitBool(value); } public static BoolValue instance(boolean bit) { return (bit ? trueValue : falseValue); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Cursor.java b/vespajlib/src/main/java/com/yahoo/slime/Cursor.java index 2696e923bd5..e6493a2ba4c 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Cursor.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Cursor.java @@ -16,270 +16,298 @@ package com.yahoo.slime; * not connected to an array value (for add methods), or it's not * connected to an object (for set methods). Also note that you can * only set() a field once; you cannot overwrite the field in any way. - **/ + * + * @author havardpe + */ public interface Cursor extends Inspector { /** - * Access an array entry. + * Accesses an array entry. * * If the current Cursor doesn't connect to an array value, * or the given array index is out of bounds, the returned * Cursor will be invalid. - * @param idx array index. - * @return a new Cursor for the entry value. - **/ + * + * @param idx array index + * @return a new Cursor for the entry value + */ @Override - public Cursor entry(int idx); + Cursor entry(int idx); /** - * Access an field in an object by symbol id. + * Accesses a field in an object by symbol id. * * If the current Cursor doesn't connect to an object value, or * the object value does not contain a field with the given symbol * id, the returned Cursor will be invalid. - * @param sym symbol id. - * @return a new Cursor for the field value. - **/ + * + * @param sym symbol id + * @return a new Cursor for the field value + */ @Override - public Cursor field(int sym); + Cursor field(int sym); /** - * Access an field in an object by symbol name. + * Accesses a field in an object by symbol name. * * If the current Cursor doesn't connect to an object value, or * the object value does not contain a field with the given symbol * name, the returned Cursor will be invalid. - * @param name symbol name. - * @return a new Cursor for the field value. - **/ + * + * @param name symbol name + * @return a new Cursor for the field value + */ @Override - public Cursor field(String name); + Cursor field(String name); /** - * Append an array entry containing a new value of NIX type. + * Appends an array entry containing a new value of NIX type. * Returns an invalid Cursor if unsuccessful. - * @return a valid Cursor referencing the new entry value if successful. - **/ - public Cursor addNix(); + * + * @return a valid Cursor referencing the new entry value if successful + */ + Cursor addNix(); /** - * Append an array entry containing a new value of BOOL type. + * Appends an array entry containing a new value of BOOL type. * Returns an invalid Cursor if unsuccessful. - * @param bit the actual boolean value for initializing a new BoolValue. - * @return a valid Cursor referencing the new entry value if successful. - **/ - public Cursor addBool(boolean bit); + * + * @param bit the actual boolean value for initializing a new BoolValue + * @return a valid Cursor referencing the new entry value if successful + */ + Cursor addBool(boolean bit); - /** add a new entry of LONG type to an array */ - public Cursor addLong(long l); + /** Adds a new entry of LONG type to an array. */ + Cursor addLong(long l); - /** add a new entry of DOUBLE type to an array */ - public Cursor addDouble(double d); + /** Adds a new entry of DOUBLE type to an array. */ + Cursor addDouble(double d); - /** add a new entry of STRING type to an array */ - public Cursor addString(String str); + /** Add a new entry of STRING type to an array. */ + Cursor addString(String str); - /** add a new entry of STRING type to an array */ - public Cursor addString(byte[] utf8); + /** Add a new entry of STRING type to an array. */ + Cursor addString(byte[] utf8); - /** add a new entry of DATA type to an array */ - public Cursor addData(byte[] data); + /** Add a new entry of DATA type to an array. */ + Cursor addData(byte[] data); /** - * Append an array entry containing a new value of ARRAY type. + * Appends an array entry containing a new value of ARRAY type. * Returns a valid Cursor (thay may again be used for adding new * sub-array entries) referencing the new entry value if * successful; otherwise returns an invalid Cursor. - * @return new Cursor for the new entry value - **/ - public Cursor addArray(); + * + * @return a new Cursor for the new entry value + */ + Cursor addArray(); /** - * Append an array entry containing a new value of OBJECT type. + * Appends an array entry containing a new value of OBJECT type. * Returns a valid Cursor (thay may again be used for setting * sub-fields inside the new object) referencing the new entry * value if successful; otherwise returns an invalid Cursor. - * @return new Cursor for the new entry value - **/ - public Cursor addObject(); + * + * @return a new Cursor for the new entry value + */ + Cursor addObject(); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of NIX type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @return new Cursor for the new field value - **/ - public Cursor setNix(int sym); + */ + Cursor setNix(int sym); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param bit the actual boolean value for the new field * @return new Cursor for the new field value - **/ - public Cursor setBool(int sym, boolean bit); + */ + Cursor setBool(int sym, boolean bit); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param l the actual long value for the new field * @return new Cursor for the new field value - **/ - public Cursor setLong(int sym, long l); + */ + Cursor setLong(int sym, long l); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param d the actual double value for the new field * @return new Cursor for the new field value - **/ - public Cursor setDouble(int sym, double d); + */ + Cursor setDouble(int sym, double d); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param str the actual string for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(int sym, String str); + * @return a new Cursor for the new field value + */ + Cursor setString(int sym, String str); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param utf8 the actual string (encoded as UTF-8 data) for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(int sym, byte[] utf8); + * @return a new Cursor for the new field value + */ + Cursor setString(int sym, byte[] utf8); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set * @param data the actual data to be put into the new field - * @return new Cursor for the new field value - **/ - public Cursor setData(int sym, byte[] data); + * @return a new Cursor for the new field value + */ + Cursor setData(int sym, byte[] data); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of ARRAY type. Returns a valid Cursor (thay may again be * used for adding new array entries) referencing the new field * value if successful; otherwise returns an invalid Cursor. + * * @param sym symbol id for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setArray(int sym); + * @return a new Cursor for the new field value + */ + Cursor setArray(int sym); /** - * Set a field (identified with a symbol id) to contain a new + * Sets a field (identified with a symbol id) to contain a new * value of OBJECT type. Returns a valid Cursor (thay may again * be used for setting sub-fields inside the new object) * referencing the new field value if successful; otherwise * returns an invalid Cursor. + * * @param sym symbol id for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setObject(int sym); + * @return a new Cursor for the new field value + */ + Cursor setObject(int sym); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of NIX type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setNix(String name); + * @return a new Cursor for the new field value + */ + Cursor setNix(String name); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of BOOL type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param bit the actual boolean value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setBool(String name, boolean bit); + * @return a new Cursor for the new field value + */ + Cursor setBool(String name, boolean bit); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of LONG type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param l the actual long value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setLong(String name, long l); + * @return a new Cursor for the new field value + */ + Cursor setLong(String name, long l); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of DOUBLE type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param d the actual double value for the new field - * @return new Cursor for the new field value - **/ - public Cursor setDouble(String name, double d); + * @return a new Cursor for the new field value + */ + Cursor setDouble(String name, double d); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of STRING type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param str the actual string for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(String name, String str); + * @return a new Cursor for the new field value + */ + Cursor setString(String name, String str); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of STRING type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param utf8 the actual string (encoded as UTF-8 data) for the new field - * @return new Cursor for the new field value - **/ - public Cursor setString(String name, byte[] utf8); + * @return a new Cursor for the new field value + */ + Cursor setString(String name, byte[] utf8); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of DATA type. Returns a valid Cursor referencing the new * field value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set * @param data the actual data to be put into the new field - * @return new Cursor for the new field value - **/ - public Cursor setData(String name, byte[] data); + * @return a new Cursor for the new field value + */ + Cursor setData(String name, byte[] data); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of ARRAY type. Returns a valid Cursor (thay may again be * used for adding new array entries) referencing the new field * value if successful; otherwise returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setArray(String name); + * @return a new Cursor for the new field value + */ + Cursor setArray(String name); /** - * Set a field (identified with a symbol name) to contain a new + * Sets a field (identified with a symbol name) to contain a new * value of OBJECT type. Returns a valid Cursor (thay may again * be used for setting sub-fields inside the new object) * referencing the new field value if successful; otherwise * returns an invalid Cursor. + * * @param name symbol name for the field to be set - * @return new Cursor for the new field value - **/ - public Cursor setObject(String name); + * @return a new Cursor for the new field value + */ + Cursor setObject(String name); + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/DataValue.java b/vespajlib/src/main/java/com/yahoo/slime/DataValue.java index 5081b3fdbc7..91f20335eb1 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/DataValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/DataValue.java @@ -1,7 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class DataValue extends Value { + private final byte[] value; private DataValue(byte[] value) { this.value = value; } public static Value create(byte[] value) { @@ -11,7 +15,8 @@ final class DataValue extends Value { return new DataValue(value); } } - public final Type type() { return Type.DATA; } - public final byte[] asData() { return this.value; } - public final void accept(Visitor v) { v.visitData(value); } + public Type type() { return Type.DATA; } + public byte[] asData() { return this.value; } + public void accept(Visitor v) { v.visitData(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java b/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java index 22b685d5419..23f636f126d 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/DoubleValue.java @@ -1,11 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class DoubleValue extends Value { + private final double value; public DoubleValue(double value) { this.value = value; } - public final Type type() { return Type.DOUBLE; } - public final long asLong() { return (long)this.value; } - public final double asDouble() { return this.value; } - public final void accept(Visitor v) { v.visitDouble(value); } + public Type type() { return Type.DOUBLE; } + public long asLong() { return (long)this.value; } + public double asDouble() { return this.value; } + public void accept(Visitor v) { v.visitDouble(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java index d6818907bf3..788e872f5ce 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java +++ b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java @@ -77,7 +77,7 @@ public class JsonDecoder { @SuppressWarnings("fallthrough") private void decodeNumber(Inserter inserter) { buf.reset(); - boolean likelyFloatingPoint=false; + boolean likelyFloatingPoint = false; for (;;) { switch (c) { case '.': case 'e': case 'E': diff --git a/vespajlib/src/main/java/com/yahoo/slime/LongValue.java b/vespajlib/src/main/java/com/yahoo/slime/LongValue.java index 62752f2b27c..e728e890274 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/LongValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/LongValue.java @@ -1,11 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class LongValue extends Value { + private final long value; public LongValue(long value) { this.value = value; } - public final Type type() { return Type.LONG; } - public final long asLong() { return this.value; } - public final double asDouble() { return (double)this.value; } - public final void accept(Visitor v) { v.visitLong(value); } + public Type type() { return Type.LONG; } + public long asLong() { return this.value; } + public double asDouble() { return this.value; } + public void accept(Visitor v) { v.visitLong(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/NixValue.java b/vespajlib/src/main/java/com/yahoo/slime/NixValue.java index b65cd1dabbf..4ae60f26f07 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/NixValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/NixValue.java @@ -1,12 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; +/** + * @author havardpe + */ final class NixValue extends Value { + private static final NixValue invalidNix = new NixValue(); private static final NixValue validNix = new NixValue(); private NixValue() {} - public final Type type() { return Type.NIX; } - public final void accept(Visitor v) { + public Type type() { return Type.NIX; } + public void accept(Visitor v) { if (valid()) { v.visitNix(); } else { @@ -15,4 +19,5 @@ final class NixValue extends Value { } public static NixValue invalid() { return invalidNix; } public static NixValue instance() { return validNix; } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java b/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java index 33d2e5be4ed..6ba16f8dd6c 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/ObjectValue.java @@ -6,7 +6,9 @@ package com.yahoo.slime; * value fields. Fields can be inspected or traversed using the * {@link Inspector} interface, and you can add new fields by using the * various "set" methods in the @ref Cursor interface. - **/ + * + * @author havardpe + */ final class ObjectValue extends Value { private int capacity = 16; @@ -16,7 +18,7 @@ final class ObjectValue extends Value { private int[] hash = new int[capacity + hashSize() + (capacity << 1)]; private final SymbolTable names; - private final void rehash() { + private void rehash() { capacity = (capacity << 1); Value[] v = values; values = new Value[capacity]; @@ -37,7 +39,7 @@ final class ObjectValue extends Value { } } - private final Value put(int sym, Value value) { + private Value put(int sym, Value value) { if (used == capacity) { rehash(); } @@ -59,7 +61,7 @@ final class ObjectValue extends Value { return value; } - private final Value get(int sym) { + private Value get(int sym) { int entry = hash[capacity + (sym % hashSize())]; while (entry != 0) { final int idx = hash[entry]; @@ -77,32 +79,33 @@ final class ObjectValue extends Value { put(sym, value); } - public final Type type() { return Type.OBJECT; } - public final int children() { return used; } - public final int fields() { return used; } + public Type type() { return Type.OBJECT; } + public int children() { return used; } + public int fields() { return used; } - public final Value field(int sym) { return get(sym); } - public final Value field(String name) { return get(names.lookup(name)); } + public Value field(int sym) { return get(sym); } + public Value field(String name) { return get(names.lookup(name)); } - public final void accept(Visitor v) { v.visitObject(this); } + public void accept(Visitor v) { v.visitObject(this); } - public final void traverse(ObjectSymbolTraverser ot) { + public void traverse(ObjectSymbolTraverser ot) { for (int i = 0; i < used; ++i) { ot.field(hash[i], values[i]); } } - public final void traverse(ObjectTraverser ot) { + public void traverse(ObjectTraverser ot) { for (int i = 0; i < used; ++i) { ot.field(names.inspect(hash[i]), values[i]); } } - protected final Cursor setLeaf(int sym, Value value) { return put(sym, value); } - public final Cursor setArray(int sym) { return put(sym, new ArrayValue(names)); } - public final Cursor setObject(int sym) { return put(sym, new ObjectValue(names)); } + protected Cursor setLeaf(int sym, Value value) { return put(sym, value); } + public Cursor setArray(int sym) { return put(sym, new ArrayValue(names)); } + public Cursor setObject(int sym) { return put(sym, new ObjectValue(names)); } + + protected Cursor setLeaf(String name, Value value) { return put(names.insert(name), value); } + public Cursor setArray(String name) { return put(names.insert(name), new ArrayValue(names)); } + public Cursor setObject(String name) { return put(names.insert(name), new ObjectValue(names)); } - protected final Cursor setLeaf(String name, Value value) { return put(names.insert(name), value); } - public final Cursor setArray(String name) { return put(names.insert(name), new ArrayValue(names)); } - public final Cursor setObject(String name) { return put(names.insert(name), new ObjectValue(names)); } } diff --git a/vespajlib/src/main/java/com/yahoo/slime/StringValue.java b/vespajlib/src/main/java/com/yahoo/slime/StringValue.java index fbd4e150f7e..d7a7281ca1d 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/StringValue.java +++ b/vespajlib/src/main/java/com/yahoo/slime/StringValue.java @@ -4,8 +4,11 @@ package com.yahoo.slime; /** * A value holding a String in Java native format. * See also @ref Utf8Value (for lazy decoding). - **/ + * + * @author havardpe + */ final class StringValue extends Value { + private final String value; private byte[] utf8; private StringValue(String value) { this.value = value; } @@ -16,13 +19,14 @@ final class StringValue extends Value { return new StringValue(value); } } - public final Type type() { return Type.STRING; } - public final String asString() { return this.value; } - public final byte[] asUtf8() { + public Type type() { return Type.STRING; } + public String asString() { return this.value; } + public byte[] asUtf8() { if (utf8 == null) { utf8 = Utf8Codec.encode(value); } return utf8; } - public final void accept(Visitor v) { v.visitString(value); } + public void accept(Visitor v) { v.visitString(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java b/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java index 3ddcf4e4e24..4ea0dcc6a6e 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Utf8Value.java @@ -5,8 +5,11 @@ package com.yahoo.slime; * A value type encapsulating a String in its UTF-8 representation. * Useful for lazy decoding; if the data is just passed through in * UTF-8 it will never be converted at all. - **/ + * + * @author havardpe + */ final class Utf8Value extends Value { + private final byte[] value; private String string; private Utf8Value(byte[] value) { this.value = value; } @@ -17,13 +20,14 @@ final class Utf8Value extends Value { return new Utf8Value(value); } } - public final Type type() { return Type.STRING; } - public final String asString() { + public Type type() { return Type.STRING; } + public String asString() { if (string == null) { string = Utf8Codec.decode(value, 0, value.length); } return string; } - public final byte[] asUtf8() { return value; } - public final void accept(Visitor v) { v.visitString(value); } + public byte[] asUtf8() { return value; } + public void accept(Visitor v) { v.visitString(value); } + } diff --git a/vespajlib/src/main/java/com/yahoo/slime/Value.java b/vespajlib/src/main/java/com/yahoo/slime/Value.java index 181dc033f3f..6fb267ab9bb 100644 --- a/vespajlib/src/main/java/com/yahoo/slime/Value.java +++ b/vespajlib/src/main/java/com/yahoo/slime/Value.java @@ -1,16 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.slime; - import java.io.ByteArrayOutputStream; import java.util.Arrays; /** * Common implementation for all value types. * All default behavior is here, so specific types only - * need override their actually useful parts. - **/ - + * need override their actually useful parts + * + * @author havardpe + */ abstract class Value implements Cursor { private static final String emptyString = ""; diff --git a/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java b/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java index 67311d75029..28930b67264 100644 --- a/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java +++ b/vespajlib/src/test/java/com/yahoo/slime/SlimeUtilsTest.java @@ -8,9 +8,7 @@ import java.io.IOException; import java.util.List; import java.util.stream.Collectors; -import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -37,7 +35,8 @@ public class SlimeUtilsTest { SlimeUtils.copyObject(slime2.get(), subobj); - assertThat(root.toString(), is("{\"foo\":\"foobie\",\"bar\":{\"a\":\"a\",\"b\":2,\"c\":true,\"d\":3.14,\"e\":\"0x64\",\"f\":null}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{\"a\":\"a\",\"b\":2,\"c\":true,\"d\":3.14,\"e\":\"0x64\",\"f\":null}}", + root.toString()); } @Test @@ -61,7 +60,8 @@ public class SlimeUtilsTest { SlimeUtils.copyObject(slime2.get(), subobj); - assertThat(root.toString(), is("{\"foo\":\"foobie\",\"bar\":{\"a\":[\"foo\",4,true,3.14,null,\"0x64\",{\"inner\":\"binner\"}]}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{\"a\":[\"foo\",4,true,3.14,null,\"0x64\",{\"inner\":\"binner\"}]}}", + root.toString()); } @Test @@ -71,21 +71,21 @@ public class SlimeUtilsTest { root.setString("foo", "foobie"); root.setObject("bar"); String json = Utf8.toString(SlimeUtils.toJsonBytes(slime)); - assertThat(json, is("{\"foo\":\"foobie\",\"bar\":{}}")); + assertEquals("{\"foo\":\"foobie\",\"bar\":{}}", json); } @Test public void test_json_to_slime() { byte[] json = Utf8.toBytes("{\"foo\":\"foobie\",\"bar\":{}}"); Slime slime = SlimeUtils.jsonToSlime(json); - assertThat(slime.get().field("foo").asString(), is("foobie")); + assertEquals("foobie", slime.get().field("foo").asString()); assertTrue(slime.get().field("bar").valid()); } @Test public void test_json_to_slime_or_throw() { Slime slime = SlimeUtils.jsonToSlimeOrThrow("{\"foo\":\"foobie\",\"bar\":{}}"); - assertThat(slime.get().field("foo").asString(), is("foobie")); + assertEquals("foobie", slime.get().field("foo").asString()); assertTrue(slime.get().field("bar").valid()); } @@ -107,7 +107,7 @@ public class SlimeUtilsTest { assertEquals(0, SlimeUtils.entriesStream(inspector.field("object")).count()); assertEquals(List.of(1L, 2L, 4L, 3L, 0L), - SlimeUtils.entriesStream(inspector.field("list")).map(Inspector::asLong).collect(Collectors.toList())); + SlimeUtils.entriesStream(inspector.field("list")).map(Inspector::asLong).collect(Collectors.toList())); } } diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp index e7d923d0e87..974aafb392a 100644 --- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp +++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp @@ -5,9 +5,12 @@ #include <vespa/vespalib/btree/btreeroot.hpp> #include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/vespalib/datastore/buffer_type.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/gtest/gtest.h> using vespalib::GenerationHandler; +using vespalib::datastore::CompactionSpec; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace vespalib::btree { @@ -73,61 +76,115 @@ BTreeStoreTest::~BTreeStoreTest() inc_generation(); } +namespace { + +class ChangeWriter { + std::vector<EntryRef*> _old_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + void write(const std::vector<EntryRef>& refs); + void emplace_back(EntryRef& ref) { _old_refs.emplace_back(&ref); } +}; + +ChangeWriter::ChangeWriter(uint32_t capacity) + : _old_refs() +{ + _old_refs.reserve(capacity); +} + +ChangeWriter::~ChangeWriter() = default; + +void +ChangeWriter::write(const std::vector<EntryRef> &refs) +{ + assert(refs.size() == _old_refs.size()); + auto old_ref_itr = _old_refs.begin(); + for (auto ref : refs) { + **old_ref_itr = ref; + ++old_ref_itr; + } + assert(old_ref_itr == _old_refs.end()); + _old_refs.clear(); +} + +} + void BTreeStoreTest::test_compact_sequence(uint32_t sequence_length) { auto &store = _store; + uint32_t entry_ref_offset_bits = TreeStore::RefType::offset_bits; EntryRef ref1 = add_sequence(4, 4 + sequence_length); EntryRef ref2 = add_sequence(5, 5 + sequence_length); - EntryRef old_ref1 = ref1; - EntryRef old_ref2 = ref2; std::vector<EntryRef> refs; + refs.reserve(2); + refs.emplace_back(ref1); + refs.emplace_back(ref2); + std::vector<EntryRef> temp_refs; for (int i = 0; i < 1000; ++i) { - refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length)); + temp_refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length)); } - for (auto& ref : refs) { + for (auto& ref : temp_refs) { store.clear(ref); } inc_generation(); + ChangeWriter change_writer(refs.size()); + std::vector<EntryRef> move_refs; + move_refs.reserve(refs.size()); auto usage_before = store.getMemoryUsage(); for (uint32_t pass = 0; pass < 15; ++pass) { - auto to_hold = store.start_compact_worst_buffers(); - ref1 = store.move(ref1); - ref2 = store.move(ref2); + CompactionSpec compaction_spec(true, false); + CompactionStrategy compaction_strategy; + auto to_hold = store.start_compact_worst_buffers(compaction_spec, compaction_strategy); + std::vector<bool> filter(TreeStore::RefType::numBuffers()); + for (auto buffer_id : to_hold) { + filter[buffer_id] = true; + } + for (auto& ref : refs) { + if (ref.valid() && filter[ref.buffer_id(entry_ref_offset_bits)]) { + move_refs.emplace_back(ref); + change_writer.emplace_back(ref); + } + } + store.move(move_refs); + change_writer.write(move_refs); + move_refs.clear(); store.finishCompact(to_hold); inc_generation(); } - EXPECT_NE(old_ref1, ref1); - EXPECT_NE(old_ref2, ref2); - EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1)); - EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2)); + EXPECT_NE(ref1, refs[0]); + EXPECT_NE(ref2, refs[1]); + EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(refs[0])); + EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(refs[1])); auto usage_after = store.getMemoryUsage(); EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes()); - store.clear(ref1); - store.clear(ref2); + store.clear(refs[0]); + store.clear(refs[1]); } TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted) { auto &store = this->_store; - EntryRef ref1 = add_sequence(4, 40); - EntryRef ref2 = add_sequence(100, 130); + std::vector<EntryRef> refs; + refs.emplace_back(add_sequence(4, 40)); + refs.emplace_back(add_sequence(100, 130)); store.clear(add_sequence(1000, 20000)); inc_generation(); auto usage_before = store.getMemoryUsage(); for (uint32_t pass = 0; pass < 15; ++pass) { - auto to_hold = store.start_compact_worst_btree_nodes(); - store.move_btree_nodes(ref1); - store.move_btree_nodes(ref2); + CompactionStrategy compaction_strategy; + auto to_hold = store.start_compact_worst_btree_nodes(compaction_strategy); + store.move_btree_nodes(refs); store.finish_compact_worst_btree_nodes(to_hold); inc_generation(); } - EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(ref1)); - EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(ref2)); + EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(refs[0])); + EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(refs[1])); auto usage_after = store.getMemoryUsage(); EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes()); - store.clear(ref1); - store.clear(ref2); + store.clear(refs[0]); + store.clear(refs[1]); } TEST_F(BTreeStoreTest, require_that_short_arrays_are_compacted) diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp index 4af0b9672f2..bd4f4f8ee08 100644 --- a/vespalib/src/tests/btree/btree_test.cpp +++ b/vespalib/src/tests/btree/btree_test.cpp @@ -17,6 +17,7 @@ #include <vespa/vespalib/btree/btree.hpp> #include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/vespalib/datastore/buffer_type.hpp> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/test/btree/btree_printer.h> #include <vespa/vespalib/gtest/gtest.h> @@ -24,6 +25,7 @@ LOG_SETUP("btree_test"); using vespalib::GenerationHandler; +using vespalib::datastore::CompactionStrategy; using vespalib::datastore::EntryRef; namespace vespalib::btree { @@ -1599,8 +1601,9 @@ TEST_F(BTreeTest, require_that_compaction_works) auto memory_usage_before = t.getAllocator().getMemoryUsage(); t.foreach_key([&before_list](int key) { before_list.emplace_back(key); }); make_iterators(t, before_list, before_iterators); + CompactionStrategy compaction_strategy; for (int i = 0; i < 15; ++i) { - t.compact_worst(); + t.compact_worst(compaction_strategy); } inc_generation(g, t); auto memory_usage_after = t.getAllocator().getMemoryUsage(); diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp index dbd6d41f5e6..c58e357a9a1 100644 --- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp +++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp @@ -3,6 +3,8 @@ #include <vespa/vespalib/test/datastore/buffer_stats.h> #include <vespa/vespalib/test/datastore/memstats.h> #include <vespa/vespalib/datastore/array_store.hpp> +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/stllike/hash_map.hpp> #include <vespa/vespalib/testkit/testapp.h> #include <vespa/vespalib/test/insertion_operators.h> @@ -124,7 +126,9 @@ struct Fixture } template <typename TestedRefType> void compactWorst(bool compactMemory, bool compactAddressSpace) { - ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace); + CompactionSpec compaction_spec(compactMemory, compactAddressSpace); + CompactionStrategy compaction_strategy; + ICompactionContext::UP ctx = store.compactWorst(compaction_spec, compaction_strategy); std::vector<TestedRefType> refs; for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) { refs.emplace_back(itr->first); diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp index 6e984f286c1..796e19a97d1 100644 --- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp +++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/datastore/sharded_hash_map.h> +#include <vespa/vespalib/datastore/entry_ref_filter.h> #include <vespa/vespalib/datastore/i_compactable.h> #include <vespa/vespalib/datastore/unique_store_allocator.h> #include <vespa/vespalib/datastore/unique_store_comparator.h> @@ -12,12 +13,14 @@ #include <vespa/vespalib/gtest/gtest.h> #include <vespa/vespalib/datastore/unique_store_allocator.hpp> +#include <iostream> #include <thread> #include <vespa/log/log.h> LOG_SETUP("vespalib_datastore_shared_hash_test"); using vespalib::datastore::EntryRef; +using vespalib::datastore::EntryRefFilter; using vespalib::datastore::ICompactable; using RefT = vespalib::datastore::EntryRefT<22>; using MyAllocator = vespalib::datastore::UniqueStoreAllocator<uint32_t, RefT>; @@ -27,6 +30,26 @@ using MyHashMap = vespalib::datastore::ShardedHashMap; using GenerationHandler = vespalib::GenerationHandler; using vespalib::makeLambdaTask; +constexpr uint32_t small_population = 50; +/* + * large_population should trigger multiple callbacks from normalize_values + * and foreach_value + */ +constexpr uint32_t large_population = 1200; + +namespace vespalib::datastore { + +/* + * Print EntryRef as RefT which is used by test_normalize_values and + * test_foreach_value to differentiate between buffers + */ +void PrintTo(const EntryRef &ref, std::ostream* os) { + RefT iref(ref); + *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")"; +} + +} + namespace { void consider_yield(uint32_t i) @@ -58,6 +81,19 @@ public: } }; +uint32_t select_buffer(uint32_t i) { + if ((i % 2) == 0) { + return 0; + } + if ((i % 3) == 0) { + return 1; + } + if ((i % 5) == 0) { + return 2; + } + return 3; +} + } struct DataStoreShardedHashTest : public ::testing::Test @@ -86,7 +122,11 @@ struct DataStoreShardedHashTest : public ::testing::Test void read_work(uint32_t cnt); void read_work(); void write_work(uint32_t cnt); - void populate_sample_data(); + void populate_sample_data(uint32_t cnt); + void populate_sample_values(uint32_t cnt); + void clear_sample_values(uint32_t cnt); + void test_normalize_values(bool use_filter, bool one_filter); + void test_foreach_value(bool one_filter); }; @@ -213,13 +253,94 @@ DataStoreShardedHashTest::write_work(uint32_t cnt) } void -DataStoreShardedHashTest::populate_sample_data() +DataStoreShardedHashTest::populate_sample_data(uint32_t cnt) { - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < cnt; ++i) { insert(i); } } +void +DataStoreShardedHashTest::populate_sample_values(uint32_t cnt) +{ + for (uint32_t i = 0; i < cnt; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + result->second.store_relaxed(RefT(i + 200, select_buffer(i))); + } +} + +void +DataStoreShardedHashTest::clear_sample_values(uint32_t cnt) +{ + for (uint32_t i = 0; i < cnt; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + result->second.store_relaxed(EntryRef()); + } +} + +namespace { + +template <typename RefT> +EntryRefFilter +make_entry_ref_filter(bool one_filter) +{ + if (one_filter) { + EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits); + filter.add_buffer(3); + return filter; + } + return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits); +} + +} + +void +DataStoreShardedHashTest::test_normalize_values(bool use_filter, bool one_filter) +{ + populate_sample_data(large_population); + populate_sample_values(large_population); + if (use_filter) { + auto filter = make_entry_ref_filter<RefT>(one_filter); + EXPECT_TRUE(_hash_map.normalize_values([](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { RefT iref(ref); ref = RefT(iref.offset() + 300, iref.bufferId()); } }, filter)); + } else { + EXPECT_TRUE(_hash_map.normalize_values([](EntryRef ref) noexcept { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); })); + } + for (uint32_t i = 0; i < large_population; ++i) { + MyCompare comp(_store, i); + auto result = _hash_map.find(comp, EntryRef()); + ASSERT_NE(result, nullptr); + EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); + ASSERT_EQ(select_buffer(i), RefT(result->second.load_relaxed()).bufferId()); + if (use_filter && one_filter && select_buffer(i) != 3) { + ASSERT_EQ(i + 200, RefT(result->second.load_relaxed()).offset()); + } else { + ASSERT_EQ(i + 500, RefT(result->second.load_relaxed()).offset()); + } + result->second.store_relaxed(EntryRef()); + } +} + +void +DataStoreShardedHashTest::test_foreach_value(bool one_filter) +{ + populate_sample_data(large_population); + populate_sample_values(large_population); + + auto filter = make_entry_ref_filter<RefT>(one_filter); + std::vector<EntryRef> exp_refs; + EXPECT_FALSE(_hash_map.normalize_values([&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); }, filter)); + std::vector<EntryRef> act_refs; + _hash_map.foreach_value([&act_refs](const std::vector<EntryRef> &refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); }, filter); + EXPECT_EQ(exp_refs, act_refs); + clear_sample_values(large_population); +} + TEST_F(DataStoreShardedHashTest, single_threaded_reader_without_updates) { _report_work = true; @@ -254,7 +375,7 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported) EXPECT_EQ(0, initial_usage.deadBytes()); EXPECT_EQ(0, initial_usage.allocatedBytesOnHold()); auto guard = _generationHandler.takeGuard(); - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < small_population; ++i) { insert(i); } auto usage = _hash_map.get_memory_usage(); @@ -264,30 +385,31 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported) TEST_F(DataStoreShardedHashTest, foreach_key_works) { - populate_sample_data(); + populate_sample_data(small_population); std::vector<uint32_t> keys; _hash_map.foreach_key([this, &keys](EntryRef ref) { keys.emplace_back(_allocator.get_wrapped(ref).value()); }); std::sort(keys.begin(), keys.end()); - EXPECT_EQ(50, keys.size()); - for (uint32_t i = 0; i < 50; ++i) { + EXPECT_EQ(small_population, keys.size()); + for (uint32_t i = 0; i < small_population; ++i) { EXPECT_EQ(i, keys[i]); } } TEST_F(DataStoreShardedHashTest, move_keys_works) { - populate_sample_data(); + populate_sample_data(small_population); std::vector<EntryRef> refs; _hash_map.foreach_key([&refs](EntryRef ref) { refs.emplace_back(ref); }); std::vector<EntryRef> new_refs; MyCompactable my_compactable(_allocator, new_refs); - _hash_map.move_keys(my_compactable, std::vector<bool>(RefT::numBuffers(), true), RefT::offset_bits); + auto filter = make_entry_ref_filter<RefT>(false); + _hash_map.move_keys(my_compactable, filter); std::vector<EntryRef> verify_new_refs; _hash_map.foreach_key([&verify_new_refs](EntryRef ref) { verify_new_refs.emplace_back(ref); }); - EXPECT_EQ(50u, refs.size()); + EXPECT_EQ(small_population, refs.size()); EXPECT_NE(refs, new_refs); EXPECT_EQ(new_refs, verify_new_refs); - for (uint32_t i = 0; i < 50; ++i) { + for (uint32_t i = 0; i < small_population; ++i) { EXPECT_NE(refs[i], new_refs[i]); auto value = _allocator.get_wrapped(refs[i]).value(); auto new_value = _allocator.get_wrapped(refs[i]).value(); @@ -297,29 +419,33 @@ TEST_F(DataStoreShardedHashTest, move_keys_works) TEST_F(DataStoreShardedHashTest, normalize_values_works) { - populate_sample_data(); - for (uint32_t i = 0; i < 50; ++i) { - MyCompare comp(_store, i); - auto result = _hash_map.find(comp, EntryRef()); - ASSERT_NE(result, nullptr); - EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); - result->second.store_relaxed(EntryRef(i + 200)); - } - _hash_map.normalize_values([](EntryRef ref) noexcept { return EntryRef(ref.ref() + 300); }); - for (uint32_t i = 0; i < 50; ++i) { - MyCompare comp(_store, i); - auto result = _hash_map.find(comp, EntryRef()); - ASSERT_NE(result, nullptr); - EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value()); - ASSERT_EQ(i + 500, result->second.load_relaxed().ref()); - result->second.store_relaxed(EntryRef()); - } + test_normalize_values(false, false); +} + +TEST_F(DataStoreShardedHashTest, normalize_values_all_filter_works) +{ + test_normalize_values(true, false); +} + +TEST_F(DataStoreShardedHashTest, normalize_values_one_filter_works) +{ + test_normalize_values(true, true); +} + +TEST_F(DataStoreShardedHashTest, foreach_value_all_filter_works) +{ + test_foreach_value(false); +} + +TEST_F(DataStoreShardedHashTest, foreach_value_one_filter_works) +{ + test_foreach_value(true); } TEST_F(DataStoreShardedHashTest, compact_worst_shard_works) { - populate_sample_data(); - for (uint32_t i = 10; i < 50; ++i) { + populate_sample_data(small_population); + for (uint32_t i = 10; i < small_population; ++i) { remove(i); } commit(); diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp index ccb18f13871..917c91f2dff 100644 --- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp +++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp @@ -1,4 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/vespalib/datastore/compaction_spec.h> +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/datastore/unique_store.hpp> #include <vespa/vespalib/datastore/unique_store_remapper.h> #include <vespa/vespalib/datastore/unique_store_string_allocator.hpp> @@ -111,7 +113,9 @@ struct TestBase : public ::testing::Test { store.trimHoldLists(generation); } void compactWorst() { - auto remapper = store.compact_worst(true, true); + CompactionSpec compaction_spec(true, true); + CompactionStrategy compaction_strategy; + auto remapper = store.compact_worst(compaction_spec, compaction_strategy); std::vector<EntryRef> refs; for (const auto &elem : refStore) { refs.push_back(elem.first); diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp index 8d82c10d340..4a8b7eafe6a 100644 --- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp +++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp @@ -1,5 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include <vespa/vespalib/datastore/compaction_strategy.h> #include <vespa/vespalib/datastore/unique_store.hpp> #include <vespa/vespalib/datastore/unique_store_dictionary.hpp> #include <vespa/vespalib/datastore/sharded_hash_map.h> @@ -137,8 +138,9 @@ TYPED_TEST(UniqueStoreDictionaryTest, compaction_works) this->inc_generation(); auto btree_memory_usage_before = this->dict.get_btree_memory_usage(); auto hash_memory_usage_before = this->dict.get_hash_memory_usage(); + CompactionStrategy compaction_strategy; for (uint32_t i = 0; i < 15; ++i) { - this->dict.compact_worst(true, true); + this->dict.compact_worst(true, true, compaction_strategy); } this->inc_generation(); auto btree_memory_usage_after = this->dict.get_btree_memory_usage(); diff --git a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp index e129ef2a389..812d06868fd 100644 --- a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp +++ b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp @@ -7,57 +7,93 @@ using namespace vespalib; using namespace vespalib::net::tls; -bool glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { - auto glob = CredentialMatchPattern::create_from_glob(pattern); +bool dns_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { + auto glob = CredentialMatchPattern::create_from_dns_glob(pattern); return glob->matches(string_to_check); } +bool uri_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) { + auto glob = CredentialMatchPattern::create_from_uri_glob(pattern); + return glob->matches(string_to_check); +} + +void verify_all_glob_types_match(vespalib::stringref pattern, vespalib::stringref string_to_check) { + EXPECT_TRUE(dns_glob_matches(pattern, string_to_check)); + EXPECT_TRUE(uri_glob_matches(pattern, string_to_check)); +} + +void verify_all_glob_types_mismatch(vespalib::stringref pattern, vespalib::stringref string_to_check) { + EXPECT_FALSE(dns_glob_matches(pattern, string_to_check)); + EXPECT_FALSE(uri_glob_matches(pattern, string_to_check)); +} + TEST("glob without wildcards matches entire string") { - EXPECT_TRUE(glob_matches("foo", "foo")); - EXPECT_FALSE(glob_matches("foo", "fooo")); - EXPECT_FALSE(glob_matches("foo", "ffoo")); + verify_all_glob_types_match("foo", "foo"); + verify_all_glob_types_mismatch("foo", "fooo"); + verify_all_glob_types_mismatch("foo", "ffoo"); } TEST("wildcard glob can match prefix") { - EXPECT_TRUE(glob_matches("foo*", "foo")); - EXPECT_TRUE(glob_matches("foo*", "foobar")); - EXPECT_FALSE(glob_matches("foo*", "ffoo")); + verify_all_glob_types_match("foo*", "foo"); + verify_all_glob_types_match("foo*", "foobar"); + verify_all_glob_types_mismatch("foo*", "ffoo"); } TEST("wildcard glob can match suffix") { - EXPECT_TRUE(glob_matches("*foo", "foo")); - EXPECT_TRUE(glob_matches("*foo", "ffoo")); - EXPECT_FALSE(glob_matches("*foo", "fooo")); + verify_all_glob_types_match("*foo", "foo"); + verify_all_glob_types_match("*foo", "ffoo"); + verify_all_glob_types_mismatch("*foo", "fooo"); } TEST("wildcard glob can match substring") { - EXPECT_TRUE(glob_matches("f*o", "fo")); - EXPECT_TRUE(glob_matches("f*o", "foo")); - EXPECT_TRUE(glob_matches("f*o", "ffoo")); - EXPECT_FALSE(glob_matches("f*o", "boo")); + verify_all_glob_types_match("f*o", "fo"); + verify_all_glob_types_match("f*o", "foo"); + verify_all_glob_types_match("f*o", "ffoo"); + verify_all_glob_types_mismatch("f*o", "boo"); } -TEST("wildcard glob does not cross multiple dot delimiter boundaries") { - EXPECT_TRUE(glob_matches("*.bar.baz", "foo.bar.baz")); - EXPECT_TRUE(glob_matches("*.bar.baz", ".bar.baz")); - EXPECT_FALSE(glob_matches("*.bar.baz", "zoid.foo.bar.baz")); - EXPECT_TRUE(glob_matches("foo.*.baz", "foo.bar.baz")); - EXPECT_FALSE(glob_matches("foo.*.baz", "foo.bar.zoid.baz")); +TEST("single char DNS glob matches single character") { + EXPECT_TRUE(dns_glob_matches("f?o", "foo")); + EXPECT_FALSE(dns_glob_matches("f?o", "fooo")); + EXPECT_FALSE(dns_glob_matches("f?o", "ffoo")); } -TEST("single char glob matches non dot characters") { - EXPECT_TRUE(glob_matches("f?o", "foo")); - EXPECT_FALSE(glob_matches("f?o", "fooo")); - EXPECT_FALSE(glob_matches("f?o", "ffoo")); - EXPECT_FALSE(glob_matches("f?o", "f.o")); +// Due to URIs being able to contain '?' characters as a query separator, don't use it for wildcarding. +TEST("URI glob matching treats question mark character as literal match") { + EXPECT_TRUE(uri_glob_matches("f?o", "f?o")); + EXPECT_FALSE(uri_glob_matches("f?o", "foo")); + EXPECT_FALSE(uri_glob_matches("f?o", "f?oo")); +} + +TEST("wildcard DNS glob does not cross multiple dot delimiter boundaries") { + EXPECT_TRUE(dns_glob_matches("*.bar.baz", "foo.bar.baz")); + EXPECT_TRUE(dns_glob_matches("*.bar.baz", ".bar.baz")); + EXPECT_FALSE(dns_glob_matches("*.bar.baz", "zoid.foo.bar.baz")); + EXPECT_TRUE(dns_glob_matches("foo.*.baz", "foo.bar.baz")); + EXPECT_FALSE(dns_glob_matches("foo.*.baz", "foo.bar.zoid.baz")); +} + +TEST("wildcard URI glob does not cross multiple fwd slash delimiter boundaries") { + EXPECT_TRUE(uri_glob_matches("*/bar/baz", "foo/bar/baz")); + EXPECT_TRUE(uri_glob_matches("*/bar/baz", "/bar/baz")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "bar/baz")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "/bar/baz/")); + EXPECT_FALSE(uri_glob_matches("*/bar/baz", "zoid/foo/bar/baz")); + EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar/baz")); + EXPECT_FALSE(uri_glob_matches("foo/*/baz", "foo/bar/zoid/baz")); + EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar.zoid/baz")); // No special handling of dots +} + +TEST("single char DNS glob matches non dot characters only") { + EXPECT_FALSE(dns_glob_matches("f?o", "f.o")); } TEST("special basic regex characters are escaped") { - EXPECT_TRUE(glob_matches("$[.\\^", "$[.\\^")); + verify_all_glob_types_match("$[.\\^", "$[.\\^"); } TEST("special extended regex characters are ignored") { - EXPECT_TRUE(glob_matches("{)(+|]}", "{)(+|]}")); + verify_all_glob_types_match("{)(+|]}", "{)(+|]}"); } // TODO CN + SANs @@ -116,7 +152,7 @@ TEST("DNS SAN requirement without glob pattern is matched as exact string") { EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"hello.world.bar"}}))); } -TEST("DNS SAN requirement can include glob wildcards") { +TEST("DNS SAN requirement can include glob wildcards, delimited by dot character") { auto authorized = authorized_peers({policy_with({required_san_dns("*.w?rld")})}); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"hello.world"}}))); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"greetings.w0rld"}}))); @@ -124,8 +160,8 @@ TEST("DNS SAN requirement can include glob wildcards") { EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"world"}}))); } -// FIXME make this RFC 2459-compliant with subdomain matching, case insensitity for host etc -TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violation of RFC 2459") { +// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host +TEST("URI SAN requirement without glob pattern is matched as exact string") { auto authorized = authorized_peers({policy_with({required_san_uri("foo://bar.baz/zoid")})}); EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoid"}}))); EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoi"}}))); @@ -136,6 +172,25 @@ TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violat EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://BAR.baz/zoid"}}))); } +// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host +TEST("URI SAN requirement can include glob wildcards, delimited by fwd slash character") { + auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/*/uri")})}); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uri"}}))); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/really.cool/uri"}}))); // Not delimited by dots + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"theirscheme://my/cool/uri"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://their/cool/uri"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uris"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/swag/uri/"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/uri"}}))); +} + +TEST("URI SAN requirement can include query part even though it's rather silly to do so") { + auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/fancy/*?magic")})}); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/uri?magic"}}))); + EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/?magic"}}))); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/urimagic"}}))); +} + TEST("multi-SAN policy requires all SANs to be present in certificate") { auto authorized = authorized_peers({policy_with({required_san_dns("hello.world"), required_san_dns("foo.bar"), @@ -157,6 +212,13 @@ TEST("wildcard DNS SAN in certificate is not treated as a wildcard match by poli EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"*.world"}}))); } +TEST("wildcard URI SAN in certificate is not treated as a wildcard match by policy") { + auto authorized = authorized_peers({policy_with({required_san_uri("hello://world")})}); + EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"hello://*"}}))); +} + +// TODO this is just by coincidence since we match '*' as any other character, not because we interpret +// the wildcard in the SAN as anything special during matching. Consider if we need/want to handle explicitly. TEST("wildcard DNS SAN in certificate is still matched by wildcard policy SAN") { auto authorized = authorized_peers({policy_with({required_san_dns("*.world")})}); EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"*.world"}}))); diff --git a/vespalib/src/tests/thread/thread_test.cpp b/vespalib/src/tests/thread/thread_test.cpp index 43951b4b734..ee4f97c34cc 100644 --- a/vespalib/src/tests/thread/thread_test.cpp +++ b/vespalib/src/tests/thread/thread_test.cpp @@ -6,6 +6,8 @@ using namespace vespalib; +VESPA_THREAD_STACK_TAG(test_agent_thread); + struct Agent : public Runnable { bool started; int loopCnt; @@ -22,7 +24,7 @@ struct Agent : public Runnable { TEST("thread never started") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); } EXPECT_TRUE(!agent.started); EXPECT_EQUAL(0, agent.loopCnt); @@ -31,7 +33,7 @@ TEST("thread never started") { TEST("normal operation") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); thread.start(); std::this_thread::sleep_for(20ms); thread.stop().join(); @@ -43,7 +45,7 @@ TEST("normal operation") { TEST("stop before start") { Agent agent; { - Thread thread(agent); + Thread thread(agent, test_agent_thread); thread.stop(); thread.start(); thread.join(); diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp index 9ad0e95667b..cf84ab03a25 100644 --- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp +++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp @@ -19,19 +19,14 @@ assertUsage(const MemoryUsage & exp, const MemoryUsage & act) TEST("test generation holder") { - typedef std::unique_ptr<int32_t> IntPtr; GenerationHolder gh; - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(0))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 0)); gh.transferHoldLists(0); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(1))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 1)); gh.transferHoldLists(1); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(2))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 2)); gh.transferHoldLists(2); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(4))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 4)); gh.transferHoldLists(4); EXPECT_EQUAL(4u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(0); @@ -40,8 +35,7 @@ TEST("test generation holder") EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(2); EXPECT_EQUAL(2u * sizeof(int32_t), gh.getHeldBytes()); - gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t), - IntPtr(new int32_t(6))))); + gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 6)); gh.transferHoldLists(6); EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes()); gh.trimHoldLists(6); diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h index 2b03e70fbdf..f87d5751743 100644 --- a/vespalib/src/vespa/vespalib/btree/btree.h +++ b/vespalib/src/vespa/vespalib/btree/btree.h @@ -6,6 +6,8 @@ #include "noaggrcalc.h" #include <vespa/vespalib/util/generationhandler.h> +namespace vespalib::datastore { class CompactionStrategy; } + namespace vespalib::btree { /** @@ -149,7 +151,7 @@ public: _tree.thaw(itr); } - void compact_worst(); + void compact_worst(const datastore::CompactionStrategy& compaction_strategy); template <typename FunctionType> void diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp index c4a588bc63e..473d1f4735e 100644 --- a/vespalib/src/vespa/vespalib/btree/btree.hpp +++ b/vespalib/src/vespa/vespalib/btree/btree.hpp @@ -26,9 +26,9 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree() template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, class AggrCalcT> void -BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst() +BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst(const datastore::CompactionStrategy& compaction_strategy) { - auto to_hold = _alloc.start_compact_worst(); + auto to_hold = _alloc.start_compact_worst(compaction_strategy); _tree.move_nodes(_alloc); _alloc.finishCompact(to_hold); } diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h index 325ce0e0e47..30123b1946e 100644 --- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h +++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h @@ -113,6 +113,9 @@ public: return _node->getData(_idx); } + // Only use during compaction when changing reference to moved value + DataType &getWData() { return getWNode()->getWData(_idx); } + bool valid() const { @@ -881,6 +884,9 @@ public: _leaf.getWNode()->writeData(_leaf.getIdx(), data); } + // Only use during compaction when changing reference to moved value + DataType &getWData() { return _leaf.getWData(); } + /** * Set a new key for the current iterator position. * The new key must have the same semantic meaning as the old key. diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.h b/vespalib/src/vespa/vespalib/btree/btreenode.h index d8752d77f0b..468f17fcd1a 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenode.h +++ b/vespalib/src/vespa/vespalib/btree/btreenode.h @@ -99,6 +99,8 @@ public: } const DataT &getData(uint32_t idx) const { return _data[idx]; } + // Only use during compaction when changing reference to moved value + DataT &getWData(uint32_t idx) { return _data[idx]; } void setData(uint32_t idx, const DataT &data) { _data[idx] = data; } static bool hasData() { return true; } }; @@ -120,6 +122,9 @@ public: return BTreeNoLeafData::_instance; } + // Only use during compaction when changing reference to moved value + BTreeNoLeafData &getWData(uint32_t) const { return BTreeNoLeafData::_instance; } + void setData(uint32_t idx, const BTreeNoLeafData &data) { (void) idx; (void) data; diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h index 93615ddef82..27e73b3a2b6 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h @@ -29,6 +29,7 @@ public: using BTreeRootBaseType = BTreeRootBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>; using generation_t = vespalib::GenerationHandler::generation_t; using NodeStore = BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>; + using CompactionStrategy = datastore::CompactionStrategy; using EntryRef = datastore::EntryRef; using DataStoreBase = datastore::DataStoreBase; @@ -165,7 +166,7 @@ public: bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); } std::vector<uint32_t> startCompact() { return _nodeStore.startCompact(); } - std::vector<uint32_t> start_compact_worst() { return _nodeStore.start_compact_worst(); } + std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); } void finishCompact(const std::vector<uint32_t> &toHold) { return _nodeStore.finishCompact(toHold); diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h index 70a9ba6c73c..444bf641899 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h @@ -56,6 +56,7 @@ public: typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; typedef vespalib::GenerationHandler::generation_t generation_t; using EntryRef = datastore::EntryRef; + using CompactionStrategy = datastore::CompactionStrategy; enum NodeTypes { @@ -159,7 +160,7 @@ public: std::vector<uint32_t> startCompact(); - std::vector<uint32_t> start_compact_worst(); + std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy); void finishCompact(const std::vector<uint32_t> &toHold); diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp index ff4fa59cd74..91953507eb0 100644 --- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp @@ -3,6 +3,7 @@ #pragma once #include "btreenodestore.h" +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/datastore.hpp> namespace vespalib::btree { @@ -71,9 +72,9 @@ template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS, size_t LEAF_SLOTS> std::vector<uint32_t> BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>:: -start_compact_worst() +start_compact_worst(const CompactionStrategy &compaction_strategy) { - return _store.startCompactWorstBuffers(true, false); + return _store.startCompactWorstBuffers(datastore::CompactionSpec(true, false), compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h index 82913987e44..a79259c6e57 100644 --- a/vespalib/src/vespa/vespalib/btree/btreestore.h +++ b/vespalib/src/vespa/vespalib/btree/btreestore.h @@ -49,6 +49,8 @@ public: TraitsT::INTERNAL_SLOTS, TraitsT::LEAF_SLOTS, AggrCalcT> Builder; + using CompactionSpec = datastore::CompactionSpec; + using CompactionStrategy = datastore::CompactionStrategy; using EntryRef = datastore::EntryRef; template <typename EntryType> using BufferType = datastore::BufferType<EntryType>; @@ -298,6 +300,9 @@ public: bool isSmallArray(const EntryRef ref) const; + static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; } + bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); } + /** * Returns the cluster size for the type id. * Cluster size == 0 means we have a tree for the given reference. @@ -389,12 +394,12 @@ public: void foreach_frozen(EntryRef ref, FunctionType func) const; - std::vector<uint32_t> start_compact_worst_btree_nodes(); + std::vector<uint32_t> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy); void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold); - void move_btree_nodes(EntryRef ref); + void move_btree_nodes(const std::vector<EntryRef>& refs); - std::vector<uint32_t> start_compact_worst_buffers(); - EntryRef move(EntryRef ref); + std::vector<uint32_t> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); + void move(std::vector<EntryRef>& refs); private: static constexpr size_t MIN_BUFFER_ARRAYS = 128u; diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp index 15c546a0368..c0985ff8f94 100644 --- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp +++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp @@ -5,6 +5,7 @@ #include "btreestore.h" #include "btreebuilder.h" #include "btreebuilder.hpp" +#include <vespa/vespalib/datastore/compaction_spec.h> #include <vespa/vespalib/datastore/datastore.hpp> #include <vespa/vespalib/util/optimized.h> @@ -972,10 +973,10 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> std::vector<uint32_t> BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -start_compact_worst_btree_nodes() +start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) { _builder.clear(); - return _allocator.start_compact_worst(); + return _allocator.start_compact_worst(compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, typename CompareT, @@ -991,15 +992,15 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> void BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -move_btree_nodes(EntryRef ref) +move_btree_nodes(const std::vector<EntryRef>& refs) { - if (ref.valid()) { + for (auto& ref : refs) { RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - tree->move_nodes(_allocator); - } + assert(iRef.valid()); + uint32_t typeId = getTypeId(iRef); + assert(isBTree(typeId)); + BTreeType *tree = getWTreeEntry(iRef); + tree->move_nodes(_allocator); } } @@ -1007,31 +1008,33 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> std::vector<uint32_t> BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -start_compact_worst_buffers() +start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { freeze(); - return _store.startCompactWorstBuffers(true, false); + return _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); } template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT, typename AggrCalcT> -typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::EntryRef +void BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>:: -move(EntryRef ref) +move(std::vector<EntryRef> &refs) { - if (!ref.valid() || !_store.getCompacting(ref)) { - return ref; - } - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - auto ref_and_ptr = allocBTreeCopy(*tree); - tree->prepare_hold(); - return ref_and_ptr.ref; + for (auto& ref : refs) { + RefType iRef(ref); + assert(iRef.valid()); + assert(_store.getCompacting(iRef)); + uint32_t clusterSize = getClusterSize(iRef); + if (clusterSize == 0) { + BTreeType *tree = getWTreeEntry(iRef); + auto ref_and_ptr = allocBTreeCopy(*tree); + tree->prepare_hold(); + ref = ref_and_ptr.ref; + } else { + const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); + ref = allocKeyDataCopy(shortArray, clusterSize).ref; + } } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return allocKeyDataCopy(shortArray, clusterSize).ref; } } diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt index 6c6f5258555..c36077e4dd0 100644 --- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt @@ -5,9 +5,11 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT array_store_config.cpp buffer_type.cpp bufferstate.cpp + compaction_strategy.cpp datastore.cpp datastorebase.cpp entryref.cpp + entry_ref_filter.cpp fixed_size_hash_map.cpp sharded_hash_map.cpp unique_store.cpp diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h index 3ba0caae5b9..d9b62c310b5 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.h +++ b/vespalib/src/vespa/vespalib/datastore/array_store.h @@ -96,7 +96,7 @@ public: } void remove(EntryRef ref); - ICompactionContext::UP compactWorst(bool compactMemory, bool compactAddressSpace); + ICompactionContext::UP compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); } /** diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp index 5600c64eb3d..bbbd52c354d 100644 --- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp @@ -3,6 +3,8 @@ #pragma once #include "array_store.h" +#include "compaction_spec.h" +#include "entry_ref_filter.h" #include "datastore.hpp" #include <atomic> #include <algorithm> @@ -127,47 +129,38 @@ private: DataStoreBase &_dataStore; ArrayStoreType &_store; std::vector<uint32_t> _bufferIdsToCompact; + EntryRefFilter _filter; - bool compactingBuffer(uint32_t bufferId) { - return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(), - bufferId) != _bufferIdsToCompact.end(); - } public: CompactionContext(DataStoreBase &dataStore, ArrayStoreType &store, std::vector<uint32_t> bufferIdsToCompact) : _dataStore(dataStore), _store(store), - _bufferIdsToCompact(std::move(bufferIdsToCompact)) - {} + _bufferIdsToCompact(std::move(bufferIdsToCompact)), + _filter(RefT::numBuffers(), RefT::offset_bits) + { + _filter.add_buffers(_bufferIdsToCompact); + } ~CompactionContext() override { _dataStore.finishCompact(_bufferIdsToCompact); } void compact(vespalib::ArrayRef<EntryRef> refs) override { - if (!_bufferIdsToCompact.empty()) { - for (auto &ref : refs) { - if (ref.valid()) { - RefT internalRef(ref); - if (compactingBuffer(internalRef.bufferId())) { - EntryRef newRef = _store.add(_store.get(ref)); - std::atomic_thread_fence(std::memory_order_release); - ref = newRef; - } - } + for (auto &ref : refs) { + if (ref.valid() && _filter.has(ref)) { + EntryRef newRef = _store.add(_store.get(ref)); + std::atomic_thread_fence(std::memory_order_release); + ref = newRef; } } } void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override { - if (!_bufferIdsToCompact.empty()) { - for (auto &ref : refs) { - if (ref.load_relaxed().valid()) { - RefT internalRef(ref.load_relaxed()); - if (compactingBuffer(internalRef.bufferId())) { - EntryRef newRef = _store.add(_store.get(ref.load_relaxed())); - std::atomic_thread_fence(std::memory_order_release); - ref.store_release(newRef); - } - } + for (auto &atomic_entry_ref : refs) { + auto ref = atomic_entry_ref.load_relaxed(); + if (ref.valid() && _filter.has(ref)) { + EntryRef newRef = _store.add(_store.get(ref)); + std::atomic_thread_fence(std::memory_order_release); + atomic_entry_ref.store_release(newRef); } } } @@ -177,9 +170,9 @@ public: template <typename EntryT, typename RefT> ICompactionContext::UP -ArrayStore<EntryT, RefT>::compactWorst(bool compactMemory, bool compactAddressSpace) +ArrayStore<EntryT, RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy) { - std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compactMemory, compactAddressSpace); + std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); return std::make_unique<arraystore::CompactionContext<EntryT, RefT>> (_store, *this, std::move(bufferIdsToCompact)); } diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_spec.h b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h new file mode 100644 index 00000000000..c554f3229dd --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h @@ -0,0 +1,34 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace vespalib::datastore { + +/* + * Class describing how to compact a compactable data structure. + * + * memory - to reduce amount of "dead" memory + * address_space - to avoid running out of free buffers in data store + * (i.e. move data from small buffers to larger buffers) + */ +class CompactionSpec +{ + bool _compact_memory; + bool _compact_address_space; +public: + CompactionSpec() + : _compact_memory(false), + _compact_address_space(false) + { + } + CompactionSpec(bool compact_memory_, bool compact_address_space_) noexcept + : _compact_memory(compact_memory_), + _compact_address_space(compact_address_space_) + { + } + bool compact() const noexcept { return _compact_memory || _compact_address_space; } + bool compact_memory() const noexcept { return _compact_memory; } + bool compact_address_space() const noexcept { return _compact_address_space; } +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp new file mode 100644 index 00000000000..2dbd501f78e --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp @@ -0,0 +1,37 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "compaction_strategy.h" +#include "compaction_spec.h" +#include <vespa/vespalib/util/memoryusage.h> +#include <vespa/vespalib/util/address_space.h> +#include <iostream> + +namespace vespalib::datastore { + +bool +CompactionStrategy::should_compact_memory(const MemoryUsage& memory_usage) const +{ + return should_compact_memory(memory_usage.usedBytes(), memory_usage.deadBytes()); +} + +bool +CompactionStrategy::should_compact_address_space(const AddressSpace& address_space) const +{ + return should_compact_address_space(address_space.used(), address_space.dead()); +} + +CompactionSpec +CompactionStrategy::should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const +{ + return CompactionSpec(should_compact_memory(memory_usage), should_compact_address_space(address_space)); +} + +std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy) +{ + os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() << + ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() << + "}"; + return os; +} + +} diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h index ced28436471..9ca4a64a55b 100644 --- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h +++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h @@ -3,51 +3,73 @@ #pragma once #include <iosfwd> +#include <cstdint> -namespace search { +namespace vespalib { + +class AddressSpace; +class MemoryUsage; + +} + +namespace vespalib::datastore { + +class CompactionSpec; /* * Class describing compaction strategy for a compactable data structure. */ class CompactionStrategy { +public: + static constexpr size_t DEAD_BYTES_SLACK = 0x10000u; + static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u; private: double _maxDeadBytesRatio; // Max ratio of dead bytes before compaction double _maxDeadAddressSpaceRatio; // Max ratio of dead address space before compaction + uint32_t _max_buffers; // Max number of buffers to compact for each reason (memory usage, address space usage) + bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const { + return ((dead_bytes >= DEAD_BYTES_SLACK) && + (dead_bytes > used_bytes * getMaxDeadBytesRatio())); + } + bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const { + return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) && + (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio())); + } public: CompactionStrategy() noexcept : _maxDeadBytesRatio(0.05), - _maxDeadAddressSpaceRatio(0.2) + _maxDeadAddressSpaceRatio(0.2), + _max_buffers(1) { } CompactionStrategy(double maxDeadBytesRatio, double maxDeadAddressSpaceRatio) noexcept : _maxDeadBytesRatio(maxDeadBytesRatio), - _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio) + _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio), + _max_buffers(1) + { + } + CompactionStrategy(double maxDeadBytesRatio, double maxDeadAddressSpaceRatio, uint32_t max_buffers) noexcept + : _maxDeadBytesRatio(maxDeadBytesRatio), + _maxDeadAddressSpaceRatio(maxDeadAddressSpaceRatio), + _max_buffers(max_buffers) { } double getMaxDeadBytesRatio() const { return _maxDeadBytesRatio; } double getMaxDeadAddressSpaceRatio() const { return _maxDeadAddressSpaceRatio; } + uint32_t get_max_buffers() const noexcept { return _max_buffers; } bool operator==(const CompactionStrategy & rhs) const { - return _maxDeadBytesRatio == rhs._maxDeadBytesRatio && - _maxDeadAddressSpaceRatio == rhs._maxDeadAddressSpaceRatio; + return (_maxDeadBytesRatio == rhs._maxDeadBytesRatio) && + (_maxDeadAddressSpaceRatio == rhs._maxDeadAddressSpaceRatio) && + (_max_buffers == rhs._max_buffers); } bool operator!=(const CompactionStrategy & rhs) const { return !(operator==(rhs)); } - static constexpr size_t DEAD_BYTES_SLACK = 0x10000u; - - bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const { - return ((dead_bytes >= DEAD_BYTES_SLACK) && - (dead_bytes > used_bytes * getMaxDeadBytesRatio())); - } - - static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u; - - bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const { - return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) && - (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio())); - } + bool should_compact_memory(const MemoryUsage& memory_usage) const; + bool should_compact_address_space(const AddressSpace& address_space) const; + CompactionSpec should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const; }; std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy); -} // namespace search +} diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp index b5cab50bc33..059171e1f02 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "datastore.h" +#include "compaction_spec.h" #include <vespa/vespalib/util/array.hpp> #include <vespa/vespalib/util/stringfmt.h> #include <limits> @@ -526,8 +527,9 @@ DataStoreBase::markCompacting(uint32_t bufferId) } std::vector<uint32_t> -DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace) +DataStoreBase::startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { + (void) compaction_strategy; constexpr uint32_t noBufferId = std::numeric_limits<uint32_t>::max(); uint32_t worstMemoryBufferId = noBufferId; uint32_t worstAddressSpaceBufferId = noBufferId; @@ -540,11 +542,11 @@ DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressS uint32_t arraySize = typeHandler->getArraySize(); uint32_t reservedElements = typeHandler->getReservedElements(bufferId); size_t deadElems = state.getDeadElems() - reservedElements; - if (compactMemory && deadElems > worstDeadElems) { + if (compaction_spec.compact_memory() && deadElems > worstDeadElems) { worstMemoryBufferId = bufferId; worstDeadElems = deadElems; } - if (compactAddressSpace) { + if (compaction_spec.compact_address_space()) { size_t deadArrays = deadElems / arraySize; if (deadArrays > worstDeadArrays) { worstAddressSpaceBufferId = bufferId; diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h index 6903ae12c9c..e98d9531806 100644 --- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h +++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h @@ -12,6 +12,9 @@ namespace vespalib::datastore { +class CompactionSpec; +class CompactionStrategy; + /** * Abstract class used to store data of potential different types in underlying memory buffers. * @@ -368,7 +371,7 @@ public: } uint32_t startCompactWorstBuffer(uint32_t typeId); - std::vector<uint32_t> startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace); + std::vector<uint32_t> startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy); uint64_t get_compaction_count() const { return _compaction_count.load(std::memory_order_relaxed); } void inc_compaction_count() const { ++_compaction_count; } bool has_held_buffers() const noexcept { return _hold_buffer_count != 0u; } diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp new file mode 100644 index 00000000000..87c3c87636c --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "entry_ref_filter.h" + +namespace vespalib::datastore { + +EntryRefFilter::EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits) + : _filter(std::move(filter)), + _offset_bits(offset_bits) +{ +} + +EntryRefFilter::EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits) + : _filter(num_buffers), + _offset_bits(offset_bits) +{ +} + +EntryRefFilter::~EntryRefFilter() = default; + +EntryRefFilter +EntryRefFilter::create_all_filter(uint32_t num_buffers, uint32_t offset_bits) +{ + std::vector<bool> filter(num_buffers, true); + return EntryRefFilter(std::move(filter), offset_bits); +} + +} diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h new file mode 100644 index 00000000000..c06d843fbd0 --- /dev/null +++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h @@ -0,0 +1,35 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "entryref.h" +#include <vector> + +namespace vespalib::datastore { + +/* + * Class to filter entry refs based on which buffer the entry is referencing. + * + * Buffers being allowed have corresponding bit in _filter set. + */ +class EntryRefFilter { + std::vector<bool> _filter; + uint32_t _offset_bits; + EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits); +public: + EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits); + ~EntryRefFilter(); + bool has(EntryRef ref) const { + uint32_t buffer_id = ref.buffer_id(_offset_bits); + return _filter[buffer_id]; + } + void add_buffer(uint32_t buffer_id) { _filter[buffer_id] = true; } + void add_buffers(const std::vector<uint32_t>& ids) { + for (auto buffer_id : ids) { + _filter[buffer_id] = true; + } + } + static EntryRefFilter create_all_filter(uint32_t num_buffers, uint32_t offset_bits); +}; + +} diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp index db9fee8ea70..6f001ce3c94 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp @@ -2,6 +2,7 @@ #include "fixed_size_hash_map.h" #include "entry_comparator.h" +#include "entry_ref_filter.h" #include "i_compactable.h" #include <vespa/vespalib/util/array.hpp> #include <vespa/vespalib/util/memoryusage.h> @@ -182,7 +183,7 @@ FixedSizeHashMap::foreach_key(const std::function<void(EntryRef)>& callback) con } void -FixedSizeHashMap::move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) +FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers) { for (auto& chain_head : _chain_heads) { uint32_t node_idx = chain_head.load_relaxed(); @@ -190,8 +191,7 @@ FixedSizeHashMap::move_keys(ICompactable& compactable, const std::vector<bool>& auto& node = _nodes[node_idx]; EntryRef old_ref = node.get_kv().first.load_relaxed(); assert(old_ref.valid()); - uint32_t buffer_id = old_ref.buffer_id(entry_ref_offset_bits); - if (compacting_buffers[buffer_id]) { + if (compacting_buffers.has(old_ref)) { EntryRef new_ref = compactable.move(old_ref); node.get_kv().first.store_release(new_ref); } @@ -220,4 +220,104 @@ FixedSizeHashMap::normalize_values(const std::function<EntryRef(EntryRef)>& norm return changed; } +namespace { + +class ChangeWriter { + std::vector<AtomicEntryRef*> _atomic_refs; +public: + ChangeWriter(uint32_t capacity); + ~ChangeWriter(); + bool write(const std::vector<EntryRef> &refs); + void emplace_back(AtomicEntryRef &atomic_ref) { _atomic_refs.emplace_back(&atomic_ref); } +}; + +ChangeWriter::ChangeWriter(uint32_t capacity) + : _atomic_refs() +{ + _atomic_refs.reserve(capacity); +} + +ChangeWriter::~ChangeWriter() = default; + +bool +ChangeWriter::write(const std::vector<EntryRef> &refs) +{ + bool changed = false; + assert(refs.size() == _atomic_refs.size()); + auto atomic_ref = _atomic_refs.begin(); + for (auto ref : refs) { + EntryRef old_ref = (*atomic_ref)->load_relaxed(); + if (ref != old_ref) { + (*atomic_ref)->store_release(ref); + changed = true; + } + ++atomic_ref; + } + assert(atomic_ref == _atomic_refs.end()); + _atomic_refs.clear(); + return changed; +} + +} + +bool +FixedSizeHashMap::normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter) +{ + std::vector<EntryRef> refs; + refs.reserve(1024); + bool changed = false; + ChangeWriter change_writer(refs.capacity()); + for (auto& chain_head : _chain_heads) { + uint32_t node_idx = chain_head.load_relaxed(); + while (node_idx != no_node_idx) { + auto& node = _nodes[node_idx]; + EntryRef ref = node.get_kv().second.load_relaxed(); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + change_writer.emplace_back(node.get_kv().second); + if (refs.size() >= refs.capacity()) { + normalize(refs); + changed |= change_writer.write(refs); + refs.clear(); + } + } + } + node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); + } + } + if (!refs.empty()) { + normalize(refs); + changed |= change_writer.write(refs); + } + return changed; +} + +void +FixedSizeHashMap::foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter) +{ + std::vector<EntryRef> refs; + refs.reserve(1024); + for (auto& chain_head : _chain_heads) { + uint32_t node_idx = chain_head.load_relaxed(); + while (node_idx != no_node_idx) { + auto& node = _nodes[node_idx]; + EntryRef ref = node.get_kv().second.load_relaxed(); + if (ref.valid()) { + if (filter.has(ref)) { + refs.emplace_back(ref); + if (refs.size() >= refs.capacity()) { + callback(refs); + refs.clear(); + } + } + } + node_idx = node.get_next_node_idx().load(std::memory_order_relaxed); + } + } + if (!refs.empty()) { + callback(refs); + } +} + } diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h index 035cd84dbee..c522bcc3c33 100644 --- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h @@ -18,6 +18,7 @@ class MemoryUsage; } namespace vespalib::datastore { +class EntryRefFilter; struct ICompactable; class ShardedHashComparator { @@ -158,8 +159,26 @@ public: size_t size() const noexcept { return _count; } MemoryUsage get_memory_usage() const; void foreach_key(const std::function<void(EntryRef)>& callback) const; - void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits); + void move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers); + /* + * Scan dictionary and call normalize function for each value. If + * returned value is different then write back the modified value to + * the dictionary. Used when clearing all posting lists. + */ bool normalize_values(const std::function<EntryRef(EntryRef)>& normalize); + /* + * Scan dictionary and call normalize function for batches of values + * that pass the filter. Write back modified values to the dictionary. + * Used by compaction of posting lists when moving short arrays, + * bitvectors or btree roots. + */ + bool normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter); + /* + * Scan dictionary and call callback function for batches of values + * that pass the filter. Used by compaction of posting lists when + * moving btree nodes. + */ + void foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter); }; } diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h index 886ec095dcd..4fd3bcad5e5 100644 --- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h @@ -10,7 +10,9 @@ namespace vespalib::datastore { +class CompactionStrategy; class EntryComparator; +class EntryRefFilter; struct ICompactable; class IUniqueStoreDictionaryReadSnapshot; class UniqueStoreAddResult; @@ -28,7 +30,7 @@ public: virtual UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) = 0; virtual EntryRef find(const EntryComparator& comp) = 0; virtual void remove(const EntryComparator& comp, EntryRef ref) = 0; - virtual void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) = 0; + virtual void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0; virtual uint32_t get_num_uniques() const = 0; virtual vespalib::MemoryUsage get_memory_usage() const = 0; virtual void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) = 0; @@ -40,7 +42,7 @@ public: virtual vespalib::MemoryUsage get_btree_memory_usage() const = 0; virtual vespalib::MemoryUsage get_hash_memory_usage() const = 0; virtual bool has_held_buffers() const = 0; - virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) = 0; + virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) = 0; }; } diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp index da4db92a309..019b98a53dd 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp @@ -171,12 +171,12 @@ ShardedHashMap::foreach_key(std::function<void(EntryRef)> callback) const } void -ShardedHashMap::move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) +ShardedHashMap::move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) { for (size_t i = 0; i < num_shards; ++i) { auto map = _maps[i].load(std::memory_order_relaxed); if (map != nullptr) { - map->move_keys(compactable, compacting_buffers, entry_ref_offset_bits); + map->move_keys(compactable, compacting_buffers); } } } @@ -195,6 +195,31 @@ ShardedHashMap::normalize_values(std::function<EntryRef(EntryRef)> normalize) } bool +ShardedHashMap::normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) +{ + bool changed = false; + for (size_t i = 0; i < num_shards; ++i) { + auto map = _maps[i].load(std::memory_order_relaxed); + if (map != nullptr) { + changed |= map->normalize_values(normalize, filter); + } + } + return changed; +} + +void +ShardedHashMap::foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) +{ + for (size_t i = 0; i < num_shards; ++i) { + auto map = _maps[i].load(std::memory_order_relaxed); + if (map != nullptr) { + map->foreach_value(callback, filter); + } + } +} + + +bool ShardedHashMap::has_held_buffers() const { return _gen_holder.getHeldBytes() != 0; diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h index df07f7a1990..e0ba9488351 100644 --- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h +++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h @@ -11,6 +11,7 @@ namespace vespalib { class MemoryUsage; } namespace vespalib::datastore { class EntryComparator; +class EntryRefFilter; class FixedSizeHashMap; struct ICompactable; @@ -57,8 +58,10 @@ public: const EntryComparator &get_default_comparator() const noexcept { return *_comp; } MemoryUsage get_memory_usage() const; void foreach_key(std::function<void(EntryRef)> callback) const; - void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits); + void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers); bool normalize_values(std::function<EntryRef(EntryRef)> normalize); + bool normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter); + void foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter); bool has_held_buffers() const; void compact_worst_shard(); }; diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h index 38643d84be0..aea98f406e8 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h @@ -55,11 +55,11 @@ public: EntryRef find(EntryConstRefType value); EntryConstRefType get(EntryRef ref) const { return _allocator.get(ref); } void remove(EntryRef ref); - std::unique_ptr<Remapper> compact_worst(bool compact_memory, bool compact_address_space); + std::unique_ptr<Remapper> compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy); vespalib::MemoryUsage getMemoryUsage() const; vespalib::MemoryUsage get_values_memory_usage() const { return _store.getMemoryUsage(); } vespalib::MemoryUsage get_dictionary_memory_usage() const { return _dict->get_memory_usage(); } - vespalib::AddressSpace get_address_space_usage() const; + vespalib::AddressSpace get_values_address_space_usage() const; // TODO: Consider exposing only the needed functions from allocator Allocator& get_allocator() { return _allocator; } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp index d375dbae149..b73b714a6bc 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp @@ -102,11 +102,9 @@ private: std::vector<uint32_t> _bufferIdsToCompact; void allocMapping() { - _compacting_buffer.resize(RefT::numBuffers()); _mapping.resize(RefT::numBuffers()); for (const auto bufferId : _bufferIdsToCompact) { BufferState &state = _dataStore.getBufferState(bufferId); - _compacting_buffer[bufferId] = true; _mapping[bufferId].resize(state.get_used_arrays()); } } @@ -124,7 +122,7 @@ private: } void fillMapping() { - _dict.move_keys(*this, _compacting_buffer, RefT::offset_bits); + _dict.move_keys(*this, _compacting_buffer); } public: @@ -140,6 +138,7 @@ public: _bufferIdsToCompact(std::move(bufferIdsToCompact)) { if (!_bufferIdsToCompact.empty()) { + _compacting_buffer.add_buffers(_bufferIdsToCompact); allocMapping(); fillMapping(); } @@ -158,9 +157,9 @@ public: template <typename EntryT, typename RefT, typename Compare, typename Allocator> std::unique_ptr<typename UniqueStore<EntryT, RefT, Compare, Allocator>::Remapper> -UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(bool compact_memory, bool compact_address_space) +UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) { - std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compact_memory, compact_address_space); + std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy); if (bufferIdsToCompact.empty()) { return std::unique_ptr<Remapper>(); } else { @@ -179,7 +178,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::getMemoryUsage() const template <typename EntryT, typename RefT, typename Compare, typename Allocator> vespalib::AddressSpace -UniqueStore<EntryT, RefT, Compare, Allocator>::get_address_space_usage() const +UniqueStore<EntryT, RefT, Compare, Allocator>::get_values_address_space_usage() const { return _allocator.get_data_store().getAddressSpaceUsage(); } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h index 3b0169b5a34..d64588e3242 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h @@ -79,7 +79,7 @@ public: UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override; EntryRef find(const EntryComparator& comp) override; void remove(const EntryComparator& comp, EntryRef ref) override; - void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) override; + void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override; uint32_t get_num_uniques() const override; vespalib::MemoryUsage get_memory_usage() const override; void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) override; @@ -91,7 +91,7 @@ public: vespalib::MemoryUsage get_btree_memory_usage() const override; vespalib::MemoryUsage get_hash_memory_usage() const override; bool has_held_buffers() const override; - void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) override; + void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) override; }; } diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp index e88376be9fb..4375b38cf7c 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp @@ -4,6 +4,7 @@ #include "datastore.hpp" #include "entry_comparator_wrapper.h" +#include "entry_ref_filter.h" #include "i_compactable.h" #include "unique_store_add_result.h" #include "unique_store_dictionary.h" @@ -139,15 +140,14 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::remove(const template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const EntryRefFilter& compacting_buffers) { if constexpr (has_btree_dictionary) { auto itr = this->_btree_dict.begin(); while (itr.valid()) { EntryRef oldRef(itr.getKey()); assert(oldRef.valid()); - uint32_t buffer_id = oldRef.buffer_id(entry_ref_offset_bits); - if (compacting_buffers[buffer_id]) { + if (compacting_buffers.has(oldRef)) { EntryRef newRef(compactable.move(oldRef)); this->_btree_dict.thaw(itr); itr.writeKey(newRef); @@ -160,7 +160,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo ++itr; } } else { - this->_hash_dict.move_keys(compactable, compacting_buffers, entry_ref_offset_bits); + this->_hash_dict.move_keys(compactable, compacting_buffers); } } @@ -339,11 +339,11 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::has_held_buff template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT> void -UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) +UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) { if constexpr (has_btree_dictionary) { if (compact_btree_dictionary) { - this->_btree_dict.compact_worst(); + this->_btree_dict.compact_worst(compaction_strategy); } } else { (void) compact_btree_dictionary; diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h index 4a8d72c8685..2501c4fafd9 100644 --- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h +++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h @@ -3,6 +3,7 @@ #pragma once #include "entryref.h" +#include "entry_ref_filter.h" #include <vector> #include <vespa/vespalib/stllike/allocator.h> @@ -18,43 +19,35 @@ public: using RefType = RefT; protected: - std::vector<bool> _compacting_buffer; + EntryRefFilter _compacting_buffer; std::vector<std::vector<EntryRef, allocator_large<EntryRef>>> _mapping; public: UniqueStoreRemapper() - : _compacting_buffer(), + : _compacting_buffer(RefT::numBuffers(), RefT::offset_bits), _mapping() { } virtual ~UniqueStoreRemapper() = default; EntryRef remap(EntryRef ref) const { - if (ref.valid()) { - RefType internal_ref(ref); - if (!_compacting_buffer[internal_ref.bufferId()]) { - // No remapping for references to buffers not being compacted - return ref; - } else { - auto &inner_mapping = _mapping[internal_ref.bufferId()]; - assert(internal_ref.unscaled_offset() < inner_mapping.size()); - EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()]; - assert(mapped_ref.valid()); - return mapped_ref; - } - } else { - return EntryRef(); - } + RefType internal_ref(ref); + auto &inner_mapping = _mapping[internal_ref.bufferId()]; + assert(internal_ref.unscaled_offset() < inner_mapping.size()); + EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()]; + assert(mapped_ref.valid()); + return mapped_ref; } void remap(vespalib::ArrayRef<EntryRef> refs) const { for (auto &ref : refs) { - auto mapped_ref = remap(ref); - if (mapped_ref != ref) { - ref = mapped_ref; + if (ref.valid() && _compacting_buffer.has(ref)) { + ref = remap(ref); } } } + const EntryRefFilter& get_entry_ref_filter() const noexcept { return _compacting_buffer; } + virtual void done() = 0; }; diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp index b4f7eb5cd96..7407ffd6a4e 100644 --- a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp +++ b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp @@ -17,28 +17,18 @@ namespace vespalib::hwaccelrated { namespace { -class Factory { -public: - virtual ~Factory() = default; - virtual IAccelrated::UP create() const = 0; -}; - -class GenericFactory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<GenericAccelrator>(); } -}; - +IAccelrated::UP create_accelerator() { #ifdef __x86_64__ -class Avx2Factory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<Avx2Accelrator>(); } -}; - -class Avx512Factory :public Factory{ -public: - IAccelrated::UP create() const override { return std::make_unique<Avx512Accelrator>(); } -}; + __builtin_cpu_init(); + if (__builtin_cpu_supports("avx512f")) { + return std::make_unique<Avx512Accelrator>(); + } + if (__builtin_cpu_supports("avx2")) { + return std::make_unique<Avx2Accelrator>(); + } #endif + return std::make_unique<GenericAccelrator>(); +} template<typename T> std::vector<T> createAndFill(size_t sz) { @@ -247,42 +237,14 @@ RuntimeVerificator::RuntimeVerificator() verify(thisCpu); } -class Selector -{ -public: - Selector() __attribute__((noinline)); - IAccelrated::UP create() { return _factory->create(); } -private: - std::unique_ptr<Factory> _factory; -}; - -Selector::Selector() : - _factory() -{ -#ifdef __x86_64__ - __builtin_cpu_init (); - if (__builtin_cpu_supports("avx512f")) { - _factory = std::make_unique<Avx512Factory>(); - } else if (__builtin_cpu_supports("avx2")) { - _factory = std::make_unique<Avx2Factory>(); - } else { - _factory = std::make_unique<GenericFactory>(); - } -#else - _factory = std::make_unique<GenericFactory>(); -#endif -} - } -static Selector _G_selector; - RuntimeVerificator _G_verifyAccelrator; const IAccelrated & IAccelrated::getAccelerator() { - static IAccelrated::UP accelrator = _G_selector.create(); + static IAccelrated::UP accelrator = create_accelerator(); return *accelrator; } diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp index 149ad01b947..a476e23e6cb 100644 --- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp +++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp @@ -22,23 +22,29 @@ bool is_regex_special_char(char c) noexcept { case '\\': case '+': case '.': + case '?': + case '*': return true; default: return false; } } -std::string dot_separated_glob_to_regex(vespalib::stringref glob) { +// Important: `delimiter` MUST NOT be a character that needs escaping within a regex [charset] +template <bool SupportSingleCharMatch> +std::string char_delimited_glob_to_regex(vespalib::stringref glob, char delimiter) { std::string ret = "^"; ret.reserve(glob.size() + 2); + // Note: we explicitly stop matching at a delimiter boundary. + // This is to make path fragment matching less vulnerable to dirty tricks. + const std::string wildcard_pattern = std::string("[^") + delimiter + "]*"; + // Same applies for single chars; they should only match _within_ a delimited boundary. + const std::string single_char_pattern = std::string("[^") + delimiter + "]"; for (auto c : glob) { if (c == '*') { - // Note: we explicitly stop matching at a dot separator boundary. - // This is to make host name matching less vulnerable to dirty tricks. - ret += "[^.]*"; - } else if (c == '?') { - // Same applies for single chars; they should only match _within_ a dot boundary. - ret += "[^.]"; + ret += wildcard_pattern; + } else if (c == '?' && SupportSingleCharMatch) { + ret += single_char_pattern; } else { if (is_regex_special_char(c)) { ret += '\\'; @@ -52,14 +58,25 @@ std::string dot_separated_glob_to_regex(vespalib::stringref glob) { class RegexHostMatchPattern : public CredentialMatchPattern { Regex _pattern_as_regex; -public: - explicit RegexHostMatchPattern(vespalib::stringref glob_pattern) - : _pattern_as_regex(Regex::from_pattern(dot_separated_glob_to_regex(glob_pattern))) + explicit RegexHostMatchPattern(std::string_view glob_pattern) + : _pattern_as_regex(Regex::from_pattern(glob_pattern)) { } +public: + RegexHostMatchPattern(RegexHostMatchPattern&&) noexcept = default; ~RegexHostMatchPattern() override = default; - [[nodiscard]] bool matches(vespalib::stringref str) const override { + RegexHostMatchPattern& operator=(RegexHostMatchPattern&&) noexcept = default; + + [[nodiscard]] static RegexHostMatchPattern from_dns_glob_pattern(vespalib::stringref glob_pattern) { + return RegexHostMatchPattern(char_delimited_glob_to_regex<true>(glob_pattern, '.')); + } + + [[nodiscard]] static RegexHostMatchPattern from_uri_glob_pattern(vespalib::stringref glob_pattern) { + return RegexHostMatchPattern(char_delimited_glob_to_regex<false>(glob_pattern, '/')); + } + + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override { return _pattern_as_regex.full_match(std::string_view(str.data(), str.size())); } }; @@ -73,15 +90,19 @@ public: } ~ExactMatchPattern() override = default; - [[nodiscard]] bool matches(vespalib::stringref str) const override { + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override { return (str == _must_match_exactly); } }; } // anon ns -std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_glob(vespalib::stringref glob_pattern) { - return std::make_shared<const RegexHostMatchPattern>(glob_pattern); +std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_dns_glob(vespalib::stringref glob_pattern) { + return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_dns_glob_pattern(glob_pattern)); +} + +std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_uri_glob(vespalib::stringref glob_pattern) { + return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_uri_glob_pattern(glob_pattern)); } std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exact_match(vespalib::stringref str) { @@ -91,9 +112,8 @@ std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exa RequiredPeerCredential::RequiredPeerCredential(Field field, vespalib::string must_match_pattern) : _field(field), _original_pattern(std::move(must_match_pattern)), - // FIXME it's not RFC 2459-compliant to use exact-matching for URIs, but that's all we currently need. - _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_exact_match(_original_pattern) - : CredentialMatchPattern::create_from_glob(_original_pattern)) + _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_from_uri_glob(_original_pattern) + : CredentialMatchPattern::create_from_dns_glob(_original_pattern)) { } @@ -111,11 +131,21 @@ void print_joined(std::ostream& os, const Collection& coll, const char* sep) { os << e; } } + +constexpr const char* to_string(RequiredPeerCredential::Field field) noexcept { + switch (field) { + case RequiredPeerCredential::Field::CN: return "CN"; + case RequiredPeerCredential::Field::SAN_DNS: return "SAN_DNS"; + case RequiredPeerCredential::Field::SAN_URI: return "SAN_URI"; + default: abort(); + } +} + } std::ostream& operator<<(std::ostream& os, const RequiredPeerCredential& cred) { os << "RequiredPeerCredential(" - << (cred.field() == RequiredPeerCredential::Field::CN ? "CN" : "SAN_DNS") + << to_string(cred.field()) << " matches '" << cred.original_pattern() << "')"; diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h index c5721858518..4166efc4312 100644 --- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h +++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h @@ -10,9 +10,10 @@ namespace vespalib::net::tls { struct CredentialMatchPattern { virtual ~CredentialMatchPattern() = default; - [[nodiscard]] virtual bool matches(vespalib::stringref str) const = 0; + [[nodiscard]] virtual bool matches(vespalib::stringref str) const noexcept = 0; - static std::shared_ptr<const CredentialMatchPattern> create_from_glob(vespalib::stringref pattern); + static std::shared_ptr<const CredentialMatchPattern> create_from_dns_glob(vespalib::stringref glob_pattern); + static std::shared_ptr<const CredentialMatchPattern> create_from_uri_glob(vespalib::stringref glob_pattern); static std::shared_ptr<const CredentialMatchPattern> create_exact_match(vespalib::stringref pattern); }; @@ -37,7 +38,7 @@ public: && (_original_pattern == rhs._original_pattern)); } - [[nodiscard]] bool matches(vespalib::stringref str) const { + [[nodiscard]] bool matches(vespalib::stringref str) const noexcept { return (_match_pattern && _match_pattern->matches(str)); } diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.h b/vespalib/src/vespa/vespalib/util/rcuvector.h index 0396ee0d459..dd4fa660279 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.h +++ b/vespalib/src/vespa/vespalib/util/rcuvector.h @@ -13,10 +13,10 @@ namespace vespalib { template <typename T> class RcuVectorHeld : public GenerationHeldBase { - std::unique_ptr<T> _data; + T _data; public: - RcuVectorHeld(size_t size, std::unique_ptr<T> data); + RcuVectorHeld(size_t size, T&& data); ~RcuVectorHeld(); }; @@ -121,7 +121,7 @@ public: void reset(); void shrink(size_t newSize) __attribute__((noinline)); - void replaceVector(std::unique_ptr<ArrayType> replacement); + void replaceVector(ArrayType replacement); }; template <typename T> diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.hpp b/vespalib/src/vespa/vespalib/util/rcuvector.hpp index 9d7c8ea57d6..3c455149dfd 100644 --- a/vespalib/src/vespa/vespalib/util/rcuvector.hpp +++ b/vespalib/src/vespa/vespalib/util/rcuvector.hpp @@ -9,7 +9,7 @@ namespace vespalib { template <typename T> -RcuVectorHeld<T>::RcuVectorHeld(size_t size, std::unique_ptr<T> data) +RcuVectorHeld<T>::RcuVectorHeld(size_t size, T&& data) : GenerationHeldBase(size), _data(std::move(data)) { } @@ -52,20 +52,21 @@ RcuVectorBase<T>::~RcuVectorBase() = default; template <typename T> void RcuVectorBase<T>::expand(size_t newCapacity) { - std::unique_ptr<ArrayType> tmpData(new ArrayType()); - tmpData->reserve(newCapacity); + ArrayType tmpData; + tmpData.reserve(newCapacity); for (const T & v : _data) { - tmpData->push_back_fast(v); + tmpData.push_back_fast(v); } replaceVector(std::move(tmpData)); } template <typename T> void -RcuVectorBase<T>::replaceVector(std::unique_ptr<ArrayType> replacement) { - replacement->swap(_data); // atomic switch of underlying data - size_t holdSize = replacement->capacity() * sizeof(T); - GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(replacement))); +RcuVectorBase<T>::replaceVector(ArrayType replacement) { + std::atomic_thread_fence(std::memory_order_release); + replacement.swap(_data); // atomic switch of underlying data + size_t holdSize = replacement.capacity() * sizeof(T); + auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(replacement)); _genHolder.hold(std::move(hold)); onReallocation(); } @@ -90,17 +91,18 @@ RcuVectorBase<T>::shrink(size_t newSize) return; } if (!_data.try_unreserve(wantedCapacity)) { - std::unique_ptr<ArrayType> tmpData(new ArrayType()); - tmpData->reserve(wantedCapacity); - tmpData->resize(newSize); + ArrayType tmpData; + tmpData.reserve(wantedCapacity); + tmpData.resize(newSize); for (uint32_t i = 0; i < newSize; ++i) { - (*tmpData)[i] = _data[i]; + tmpData[i] = _data[i]; } + std::atomic_thread_fence(std::memory_order_release); // Users of RCU vector must ensure that no readers use old size // after swap. Attribute vectors uses _committedDocIdLimit for this. - tmpData->swap(_data); // atomic switch of underlying data - size_t holdSize = tmpData->capacity() * sizeof(T); - GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(tmpData))); + tmpData.swap(_data); // atomic switch of underlying data + size_t holdSize = tmpData.capacity() * sizeof(T); + auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(tmpData)); _genHolder.hold(std::move(hold)); onReallocation(); } diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp index 80bbb3a7ad2..ab83d4e05fd 100644 --- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp +++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp @@ -8,6 +8,8 @@ using namespace vespalib::fixed_thread_bundle; namespace vespalib { +VESPA_THREAD_STACK_TAG(simple_thread_bundle_executor); + namespace { struct SignalHook : Runnable { @@ -43,7 +45,7 @@ Runnable::UP wrap(Runnable *runnable) { } Runnable::UP chain(Runnable::UP first, Runnable::UP second) { - return Runnable::UP(new HookPair(std::move(first), std::move(second))); + return std::make_unique<HookPair>(std::move(first), std::move(second)); } } // namespace vespalib::<unnamed> @@ -173,4 +175,19 @@ SimpleThreadBundle::run(const std::vector<Runnable*> &targets) latch.await(); } +SimpleThreadBundle::Worker::Worker(Signal &s, Runnable::UP h) + : thread(*this, simple_thread_bundle_executor), + signal(s), + hook(std::move(h)) +{ + thread.start(); +} +void +SimpleThreadBundle::Worker::run() { + for (size_t gen = 0; signal.wait(gen) > 0; ) { + hook->run(); +} + +} + } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h index f0aaccc2525..d9a29ee7bef 100644 --- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h +++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h @@ -112,14 +112,8 @@ private: Thread thread; Signal &signal; Runnable::UP hook; - Worker(Signal &s, Runnable::UP h) : thread(*this), signal(s), hook(std::move(h)) { - thread.start(); - } - void run() override { - for (size_t gen = 0; signal.wait(gen) > 0; ) { - hook->run(); - } - } + Worker(Signal &s, Runnable::UP h); + void run() override; }; Work _work; diff --git a/vespalib/src/vespa/vespalib/util/thread.cpp b/vespalib/src/vespa/vespalib/util/thread.cpp index c02a7a3b063..c3230bf313d 100644 --- a/vespalib/src/vespa/vespalib/util/thread.cpp +++ b/vespalib/src/vespa/vespalib/util/thread.cpp @@ -9,9 +9,9 @@ namespace vespalib { __thread Thread *Thread::_currentThread = nullptr; -Thread::Proxy::Proxy(Thread &parent, Runnable &target) - : thread(parent), runnable(target), - start(), started(), cancel(false) +Thread::Proxy::Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in) + : thread(parent), runnable(target), init_fun(std::move(init_fun_in)), + start(), started(), cancel(false) { } void @@ -22,7 +22,7 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *) start.await(); if (!cancel) { started.countDown(); - runnable.run(); + init_fun(runnable); } assert(_currentThread == &thread); _currentThread = nullptr; @@ -30,8 +30,8 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *) Thread::Proxy::~Proxy() = default; -Thread::Thread(Runnable &runnable) - : _proxy(*this, runnable), +Thread::Thread(Runnable &runnable, init_fun_t init_fun_in) + : _proxy(*this, runnable, std::move(init_fun_in)), _pool(STACK_SIZE, 1), _lock(), _cond(), diff --git a/vespalib/src/vespa/vespalib/util/thread.h b/vespalib/src/vespa/vespalib/util/thread.h index 8873f23ee98..e08f3ca1100 100644 --- a/vespalib/src/vespa/vespalib/util/thread.h +++ b/vespalib/src/vespa/vespalib/util/thread.h @@ -15,17 +15,19 @@ namespace vespalib { class Thread : public Active { private: + using init_fun_t = Runnable::init_fun_t; enum { STACK_SIZE = 256*1024 }; static __thread Thread *_currentThread; struct Proxy : FastOS_Runnable { Thread &thread; Runnable &runnable; + init_fun_t init_fun; vespalib::Gate start; vespalib::Gate started; bool cancel; - Proxy(Thread &parent, Runnable &target); + Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in); ~Proxy() override; void Run(FastOS_ThreadInterface *thisThread, void *arguments) override; @@ -39,7 +41,7 @@ private: bool _woken; public: - Thread(Runnable &runnable); + Thread(Runnable &runnable, init_fun_t init_fun_in); ~Thread() override; void start() override; Thread &stop() override; |