diff options
187 files changed, 2072 insertions, 1611 deletions
diff --git a/application/pom.xml b/application/pom.xml index bb1ea4b30ee..bbb4b171676 100644 --- a/application/pom.xml +++ b/application/pom.xml @@ -87,6 +87,12 @@ <dependency> <groupId>org.apache.felix</groupId> <artifactId>org.apache.felix.log</artifactId> + <exclusions> + <exclusion> + <groupId>org.osgi</groupId> + <artifactId>*</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <groupId>org.apache.opennlp</groupId> @@ -104,6 +110,10 @@ <groupId>org.apache.commons</groupId> <artifactId>commons-math3</artifactId> </dependency> + <dependency> + <groupId>jakarta.inject</groupId> + <artifactId>jakarta.inject-api</artifactId> + </dependency> <dependency> <groupId>com.yahoo.vespa</groupId> diff --git a/bootstrap.sh b/bootstrap.sh index e6d61e7c7a5..d6a34fdb834 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -36,9 +36,7 @@ get_env_var_with_optional_default() { fi } -# TODO: use maven-wrapper after changing mvn command for vespa in factory/build-java.sh -#readonly MAVEN_CMD=$(get_env_var_with_optional_default VESPA_MAVEN_COMMAND "$(pwd)/mvnw") -readonly MAVEN_CMD=$(get_env_var_with_optional_default VESPA_MAVEN_COMMAND mvn) +readonly MAVEN_CMD=$(get_env_var_with_optional_default VESPA_MAVEN_COMMAND "$(pwd)/mvnw") readonly MAVEN_EXTRA_OPTS=$(get_env_var_with_optional_default VESPA_MAVEN_EXTRA_OPTS) echo "Using maven command: ${MAVEN_CMD}" diff --git a/client/go/go.mod b/client/go/go.mod index 0d67283104f..d797017a810 100644 --- a/client/go/go.mod +++ b/client/go/go.mod @@ -17,7 +17,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/zalando/go-keyring v0.2.3 golang.org/x/net v0.14.0 - golang.org/x/sys v0.11.0 + golang.org/x/sys v0.12.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/client/go/go.sum b/client/go/go.sum index fbe0fa1207e..4bea3accfae 100644 --- a/client/go/go.sum +++ b/client/go/go.sum @@ -74,6 +74,8 @@ golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= diff --git a/client/go/internal/cli/cmd/api_key.go b/client/go/internal/cli/cmd/api_key.go index 8b3780ab82b..7c187aa5da7 100644 --- a/client/go/internal/cli/cmd/api_key.go +++ b/client/go/internal/cli/cmd/api_key.go @@ -54,11 +54,11 @@ Read more in https://cloud.vespa.ai/en/security/guide`, } func doApiKey(cli *CLI, overwriteKey bool, args []string) error { - app, err := cli.config.application() + targetType, err := cli.targetType(true) if err != nil { return err } - targetType, err := cli.targetType() + app, err := cli.config.application() if err != nil { return err } diff --git a/client/go/internal/cli/cmd/cert.go b/client/go/internal/cli/cmd/cert.go index 7fbb357d1db..5c1ed04ab4e 100644 --- a/client/go/internal/cli/cmd/cert.go +++ b/client/go/internal/cli/cmd/cert.go @@ -95,11 +95,11 @@ $ vespa auth cert add -a my-tenant.my-app.my-instance path/to/application/packag } func doCert(cli *CLI, overwriteCertificate, skipApplicationPackage bool, args []string) error { - app, err := cli.config.application() + targetType, err := cli.targetType(true) if err != nil { return err } - targetType, err := cli.targetType() + app, err := cli.config.application() if err != nil { return err } @@ -141,11 +141,11 @@ func doCert(cli *CLI, overwriteCertificate, skipApplicationPackage bool, args [] } func doCertAdd(cli *CLI, overwriteCertificate bool, args []string) error { - pkg, err := cli.applicationPackageFrom(args, false) + target, err := cli.target(targetOptions{cloudExclusive: true}) if err != nil { return err } - target, err := cli.target(targetOptions{}) + pkg, err := cli.applicationPackageFrom(args, false) if err != nil { return err } diff --git a/client/go/internal/cli/cmd/config_test.go b/client/go/internal/cli/cmd/config_test.go index b00be38d021..7a4035f54a3 100644 --- a/client/go/internal/cli/cmd/config_test.go +++ b/client/go/internal/cli/cmd/config_test.go @@ -272,7 +272,7 @@ func TestConfigTargetResolving(t *testing.T) { } func assertTargetType(t *testing.T, expected string, cli *CLI) { - targetType, err := cli.targetType() + targetType, err := cli.targetType(false) require.Nil(t, err) assert.Equal(t, expected, targetType.name) } diff --git a/client/go/internal/cli/cmd/destroy.go b/client/go/internal/cli/cmd/destroy.go index ca69f21a9b4..38d93f49675 100644 --- a/client/go/internal/cli/cmd/destroy.go +++ b/client/go/internal/cli/cmd/destroy.go @@ -36,18 +36,14 @@ $ vespa destroy --force`, DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { - target, err := cli.target(targetOptions{}) + target, err := cli.target(targetOptions{cloudExclusive: true}) if err != nil { return err } description := target.Deployment().String() - if !target.IsCloud() { - return errHint(fmt.Errorf("cannot remove deployment, only supported for Vespa Cloud")) - } else { - env := target.Deployment().Zone.Environment - if env != "dev" && env != "perf" { - return errHint(fmt.Errorf("cannot remove production %s", description), "See https://cloud.vespa.ai/en/deleting-applications") - } + env := target.Deployment().Zone.Environment + if env != "dev" && env != "perf" { + return errHint(fmt.Errorf("cannot remove production %s", description), "See https://cloud.vespa.ai/en/deleting-applications") } ok := force if !ok { diff --git a/client/go/internal/cli/cmd/destroy_test.go b/client/go/internal/cli/cmd/destroy_test.go index b23e524e0ab..44610576d7e 100644 --- a/client/go/internal/cli/cmd/destroy_test.go +++ b/client/go/internal/cli/cmd/destroy_test.go @@ -56,5 +56,5 @@ func TestDestroy(t *testing.T) { require.Nil(t, cli.Run("config", "set", "target", "local")) require.Nil(t, cli.Run("config", "set", "application", "foo.bar.baz")) require.NotNil(t, cli.Run("destroy", "-z", "prod.aws-us-east-1c")) - assert.Equal(t, "Error: cannot remove deployment, only supported for Vespa Cloud\n", stderr.String()) + assert.Equal(t, "Error: unsupported target local: this command only supports targets cloud and hosted\n", stderr.String()) } diff --git a/client/go/internal/cli/cmd/log.go b/client/go/internal/cli/cmd/log.go index fa07e33538c..8d3f3f4f384 100644 --- a/client/go/internal/cli/cmd/log.go +++ b/client/go/internal/cli/cmd/log.go @@ -34,7 +34,7 @@ $ vespa log --follow`, SilenceUsage: true, Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - target, err := cli.target(targetOptions{logLevel: levelArg}) + target, err := cli.target(targetOptions{logLevel: levelArg, cloudExclusive: true}) if err != nil { return err } diff --git a/client/go/internal/cli/cmd/login.go b/client/go/internal/cli/cmd/login.go index baf35ce7954..d6eb8207b7f 100644 --- a/client/go/internal/cli/cmd/login.go +++ b/client/go/internal/cli/cmd/login.go @@ -20,13 +20,13 @@ func newLoginCmd(cli *CLI) *cobra.Command { return &cobra.Command{ Use: "login", Args: cobra.NoArgs, - Short: "Authenticate the Vespa CLI", + Short: "Authenticate Vespa CLI with Vespa Cloud", Example: "$ vespa auth login", DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - targetType, err := cli.targetType() + targetType, err := cli.targetType(true) if err != nil { return err } diff --git a/client/go/internal/cli/cmd/logout.go b/client/go/internal/cli/cmd/logout.go index 93f7cb6270f..204513145aa 100644 --- a/client/go/internal/cli/cmd/logout.go +++ b/client/go/internal/cli/cmd/logout.go @@ -9,12 +9,12 @@ func newLogoutCmd(cli *CLI) *cobra.Command { return &cobra.Command{ Use: "logout", Args: cobra.NoArgs, - Short: "Log out of Vespa Cli", + Short: "Sign out of Vespa Cloud", Example: "$ vespa auth logout", DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { - targetType, err := cli.targetType() + targetType, err := cli.targetType(true) if err != nil { return err } diff --git a/client/go/internal/cli/cmd/prod.go b/client/go/internal/cli/cmd/prod.go index 79a6907eef2..1a2f88311b6 100644 --- a/client/go/internal/cli/cmd/prod.go +++ b/client/go/internal/cli/cmd/prod.go @@ -53,6 +53,10 @@ https://cloud.vespa.ai/en/reference/deployment`, DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { + target, err := cli.target(targetOptions{noCertificate: true, cloudExclusive: true}) + if err != nil { + return err + } pkg, err := cli.applicationPackageFrom(args, false) if err != nil { return err @@ -70,10 +74,6 @@ https://cloud.vespa.ai/en/reference/deployment`, if err != nil { return fmt.Errorf("a services.xml declaring your cluster(s) must exist: %w", err) } - target, err := cli.target(targetOptions{noCertificate: true}) - if err != nil { - return err - } fmt.Fprint(cli.Stdout, "This will modify any existing ", color.YellowString("deployment.xml"), " and ", color.YellowString("services.xml"), "!\nBefore modification a backup of the original file will be created.\n\n") @@ -135,7 +135,7 @@ https://cloud.vespa.ai/en/reference/vespa-cloud-api#submission-properties Example: `$ mvn package # when adding custom Java components $ vespa prod deploy`, RunE: func(cmd *cobra.Command, args []string) error { - target, err := cli.target(targetOptions{noCertificate: true}) + target, err := cli.target(targetOptions{noCertificate: true, cloudExclusive: true}) if err != nil { return err } diff --git a/client/go/internal/cli/cmd/prod_test.go b/client/go/internal/cli/cmd/prod_test.go index a01056b7178..944f09b3d42 100644 --- a/client/go/internal/cli/cmd/prod_test.go +++ b/client/go/internal/cli/cmd/prod_test.go @@ -44,6 +44,9 @@ func TestProdInit(t *testing.T) { cli, _, _ := newTestCLI(t) cli.Stdin = &buf + assert.Nil(t, cli.Run("config", "set", "target", "cloud")) + assert.Nil(t, cli.Run("config", "set", "application", "foo.bar")) + assert.Nil(t, cli.Run("auth", "api-key")) assert.Nil(t, cli.Run("prod", "init", pkgDir)) // Verify contents diff --git a/client/go/internal/cli/cmd/root.go b/client/go/internal/cli/cmd/root.go index 69fd88c1b2b..c3a3db0df57 100644 --- a/client/go/internal/cli/cmd/root.go +++ b/client/go/internal/cli/cmd/root.go @@ -74,6 +74,8 @@ type targetOptions struct { logLevel string // noCertificate declares that no client certificate should be required when using this target. noCertificate bool + // cloudExclusive specifies whether to only allow Vespa Cloud and Hosted Vespa targets + cloudExclusive bool } type targetType struct { @@ -349,7 +351,7 @@ func (c *CLI) waiter(once bool, timeout time.Duration) *Waiter { // target creates a target according the configuration of this CLI and given opts. func (c *CLI) target(opts targetOptions) (vespa.Target, error) { - targetType, err := c.targetType() + targetType, err := c.targetType(opts.cloudExclusive) if err != nil { return nil, err } @@ -374,7 +376,7 @@ func (c *CLI) target(opts targetOptions) (vespa.Target, error) { } // targetType resolves the real target type and its custom URL (if any) -func (c *CLI) targetType() (targetType, error) { +func (c *CLI) targetType(cloud bool) (targetType, error) { v, err := c.config.targetOrURL() if err != nil { return targetType{}, err @@ -387,6 +389,9 @@ func (c *CLI) targetType() (targetType, error) { return targetType{}, err } } + if cloud && tt.name != vespa.TargetCloud && tt.name != vespa.TargetHosted { + return targetType{}, fmt.Errorf("unsupported target %s: this command only supports targets %s and %s", tt.name, vespa.TargetCloud, vespa.TargetHosted) + } return tt, nil } diff --git a/client/go/internal/cli/cmd/waiter.go b/client/go/internal/cli/cmd/waiter.go index 40d1d76518e..34a10ccce33 100644 --- a/client/go/internal/cli/cmd/waiter.go +++ b/client/go/internal/cli/cmd/waiter.go @@ -35,7 +35,7 @@ func (w *Waiter) DeployService(target vespa.Target) (*vespa.Service, error) { // Service returns the service identified by cluster ID, available on target. func (w *Waiter) Service(target vespa.Target, cluster string) (*vespa.Service, error) { - targetType, err := w.cli.targetType() + targetType, err := w.cli.targetType(false) if err != nil { return nil, err } diff --git a/client/go/internal/vespa/deploy.go b/client/go/internal/vespa/deploy.go index 1bfaf641243..d42c65cef1e 100644 --- a/client/go/internal/vespa/deploy.go +++ b/client/go/internal/vespa/deploy.go @@ -263,7 +263,7 @@ func Submit(opts DeploymentOptions, submission Submission) error { return err } submitURL := opts.Target.Deployment().System.SubmitURL(opts.Target.Deployment()) - u, err := opts.url(submitURL) + u, err := url.Parse(submitURL) if err != nil { return err } diff --git a/client/go/internal/vespa/deploy_test.go b/client/go/internal/vespa/deploy_test.go index 9addf81138a..693d4527624 100644 --- a/client/go/internal/vespa/deploy_test.go +++ b/client/go/internal/vespa/deploy_test.go @@ -99,6 +99,7 @@ func TestSubmit(t *testing.T) { SourceURL: "https://github.com/foo/repo", })) require.Nil(t, httpClient.LastRequest.ParseMultipartForm(1<<20)) + assert.Equal(t, "https://api-ctl.vespa-cloud.com:4443/application/v4/tenant/t1/application/a1/submit", httpClient.LastRequest.URL.String()) assert.Equal(t, "{\"risk\":1,\"commit\":\"sha\",\"description\":\"broken garbage\",\"authorEmail\":\"foo@example.com\",\"sourceUrl\":\"https://github.com/foo/repo\"}", httpClient.LastRequest.FormValue("submitOptions")) diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml index 8b069933970..8871b8ac55a 100644 --- a/cloud-tenant-base-dependencies-enforcer/pom.xml +++ b/cloud-tenant-base-dependencies-enforcer/pom.xml @@ -21,6 +21,7 @@ <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-enforcer-plugin</artifactId> + <version>${maven-enforcer-plugin.vespa.version}</version> <dependencies> <dependency> <groupId>com.yahoo.vespa</groupId> @@ -53,20 +54,19 @@ <include>com.google.guava:failureaccess:[1.0.1, 2):provided</include> <include>com.google.j2objc:j2objc-annotations:[2.8, 3):provided</include> - <include>com.google.inject:guice:jar:no_aop:${guice.vespa.version}:provided</include> + <include>com.google.inject:guice:jar:${guice.vespa.version}:provided</include> <include>com.sun.activation:javax.activation:[1.2.0, 2):provided</include> - <include>com.sun.xml.bind:jaxb-core:${jaxb.vespa.version}:provided</include> - <include>com.sun.xml.bind:jaxb-impl:${jaxb.vespa.version}:provided</include> + <include>com.sun.xml.bind:jaxb-core:${jaxb-core.vespa.version}:provided</include> + <include>com.sun.xml.bind:jaxb-impl:${jaxb-impl.vespa.version}:provided</include> <include>commons-logging:commons-logging:${commons-logging.vespa.version}:provided</include> <include>javax.inject:javax.inject:${javax.inject.vespa.version}:provided</include> <include>javax.servlet:javax.servlet-api:${javax.servlet-api.vespa.version}:provided</include> <include>javax.ws.rs:javax.ws.rs-api:${javax.ws.rs-api.vespa.version}:provided</include> - <include>javax.xml.bind:jaxb-api:${jaxb.vespa.version}:provided</include> + <include>javax.xml.bind:jaxb-api:${jaxb-api.vespa.version}:provided</include> <include>org.slf4j:jcl-over-slf4j:${slf4j.vespa.version}:provided</include> <include>org.slf4j:log4j-over-slf4j:${slf4j.vespa.version}:provided</include> <include>org.slf4j:slf4j-api:${slf4j.vespa.version}:provided</include> <include>org.slf4j:slf4j-jdk14:${slf4j.vespa.version}:provided</include> - <include>xml-apis:xml-apis:${xml-apis.vespa.version}:provided</include> <!-- Vespa provided dependencies --> <include>com.yahoo.vespa:annotations:*:provided</include> @@ -149,6 +149,7 @@ <include>io.prometheus:simpleclient_tracer_common:${prometheus.client.vespa.version}:test</include> <include>io.prometheus:simpleclient_tracer_otel:${prometheus.client.vespa.version}:test</include> <include>io.prometheus:simpleclient_tracer_otel_agent:${prometheus.client.vespa.version}:test</include> + <include>jakarta.inject:jakarta.inject-api:${jakarta.inject.vespa.version}:test</include> <include>junit:junit:${junit4.vespa.version}:test</include> <include>net.java.dev.jna:jna:${jna.vespa.version}:test</include> <include>net.openhft:zero-allocation-hashing:jar:${zero-allocation-hashing.vespa.version}:test</include> @@ -199,8 +200,6 @@ <include>org.junit.vintage:junit-vintage-engine:${junit.vespa.version}:test</include> <include>org.lz4:lz4-java:${org.lz4.vespa.version}:test</include> <include>org.opentest4j:opentest4j:${opentest4j.vespa.version}:test</include> - <include>org.osgi:org.osgi.compendium:[4.1.0, 5):test</include> - <include>org.osgi:org.osgi.core:[4.1.0, 5):test</include> <include>xerces:xercesImpl:${xerces.vespa.version}:test</include> </allowed> </enforceDependencies> diff --git a/config-model-api/pom.xml b/config-model-api/pom.xml index 5053e4b3472..aaa26a136b5 100644 --- a/config-model-api/pom.xml +++ b/config-model-api/pom.xml @@ -75,11 +75,6 @@ <version>${project.version}</version> <scope>test</scope> </dependency> - <dependency> - <groupId>com.google.guava</groupId> - <artifactId>guava-testlib</artifactId> - <scope>test</scope> - </dependency> </dependencies> <build> <plugins> diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java index b7969267328..69749ee6f96 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ApplicationClusterEndpoint.java @@ -18,21 +18,6 @@ import java.util.stream.Stream; * @author mortent */ public class ApplicationClusterEndpoint { - @Override - public String toString() { - return "ApplicationClusterEndpoint{" + - "dnsName=" + dnsName + - ", scope=" + scope + - ", routingMethod=" + routingMethod + - ", weight=" + weight + - ", hostNames=" + hostNames + - ", clusterId='" + clusterId + "'" + - '}'; - } - - public enum Scope {application, global, zone} - - public enum RoutingMethod {shared, sharedLayer4, exclusive} private final DnsName dnsName; private final Scope scope; @@ -40,14 +25,16 @@ public class ApplicationClusterEndpoint { private final int weight; private final List<String> hostNames; private final String clusterId; + private final AuthMethod authMethod; - private ApplicationClusterEndpoint(DnsName dnsName, Scope scope, RoutingMethod routingMethod, int weight, List<String> hostNames, String clusterId) { - this.dnsName = dnsName; - this.scope = scope; - this.routingMethod = routingMethod; + private ApplicationClusterEndpoint(DnsName dnsName, Scope scope, RoutingMethod routingMethod, int weight, List<String> hostNames, String clusterId, AuthMethod authMethod) { + this.dnsName = Objects.requireNonNull(dnsName); + this.scope = Objects.requireNonNull(scope); + this.routingMethod = Objects.requireNonNull(routingMethod); this.weight = weight; - this.hostNames = List.copyOf(hostNames); - this.clusterId = clusterId; + this.hostNames = List.copyOf(Objects.requireNonNull(hostNames)); + this.clusterId = Objects.requireNonNull(clusterId); + this.authMethod = Objects.requireNonNull(authMethod); } public DnsName dnsName() { @@ -74,18 +61,42 @@ public class ApplicationClusterEndpoint { return clusterId; } + public AuthMethod authMethod() { + return authMethod; + } + + @Override + public String toString() { + return "ApplicationClusterEndpoint{" + + "dnsName=" + dnsName + + ", scope=" + scope + + ", routingMethod=" + routingMethod + + ", weight=" + weight + + ", hostNames=" + hostNames + + ", clusterId='" + clusterId + '\'' + + ", authMethod=" + authMethod + + '}'; + } + public static Builder builder() { return new Builder(); } + public enum Scope { application, global, zone } + + public enum RoutingMethod { shared, sharedLayer4, exclusive } + + public enum AuthMethod { mtls, token } + public static class Builder { private DnsName dnsName; private Scope scope; private RoutingMethod routingMethod; - private int weigth = 1; + private int weight = 1; private List<String> hosts; private String clusterId; + private AuthMethod authMethod = AuthMethod.mtls; // TODO(mpolden): For compatibility with older config-models. Remove when < 8.221 is gone public Builder dnsName(DnsName name) { this.dnsName = name; @@ -118,7 +129,7 @@ public class ApplicationClusterEndpoint { } public Builder weight(int weigth) { - this.weigth = weigth; + this.weight = weigth; return this; } @@ -132,9 +143,15 @@ public class ApplicationClusterEndpoint { return this; } + public Builder authMethod(AuthMethod authMethod) { + this.authMethod = authMethod; + return this; + } + public ApplicationClusterEndpoint build() { - return new ApplicationClusterEndpoint(dnsName, scope, routingMethod, weigth, hosts, clusterId); + return new ApplicationClusterEndpoint(dnsName, scope, routingMethod, weight, hosts, clusterId, authMethod); } + } public static class DnsName implements Comparable<DnsName> { diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java index 78da750fb5b..de06ddd549a 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java @@ -3,9 +3,7 @@ package com.yahoo.config.model.api; import java.util.List; import java.util.Objects; -import java.util.Optional; import java.util.OptionalInt; -import java.util.OptionalLong; /** * ContainerEndpoint tracks the service names that a Container Cluster should be @@ -21,6 +19,7 @@ public class ContainerEndpoint { private final List<String> names; private final OptionalInt weight; private final ApplicationClusterEndpoint.RoutingMethod routingMethod; + private final ApplicationClusterEndpoint.AuthMethod authMethod; public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names) { this(clusterId, scope, names, OptionalInt.empty()); @@ -31,11 +30,16 @@ public class ContainerEndpoint { } public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names, OptionalInt weight, ApplicationClusterEndpoint.RoutingMethod routingMethod) { + this(clusterId, scope, names, weight, routingMethod, ApplicationClusterEndpoint.AuthMethod.mtls); + } + + public ContainerEndpoint(String clusterId, ApplicationClusterEndpoint.Scope scope, List<String> names, OptionalInt weight, ApplicationClusterEndpoint.RoutingMethod routingMethod, ApplicationClusterEndpoint.AuthMethod authMethod) { this.clusterId = Objects.requireNonNull(clusterId); this.scope = Objects.requireNonNull(scope); this.names = List.copyOf(Objects.requireNonNull(names)); - this.weight = weight; - this.routingMethod = routingMethod; + this.weight = Objects.requireNonNull(weight); + this.routingMethod = Objects.requireNonNull(routingMethod); + this.authMethod = Objects.requireNonNull(authMethod); } public String clusterId() { @@ -58,6 +62,10 @@ public class ContainerEndpoint { return routingMethod; } + public ApplicationClusterEndpoint.AuthMethod authMethod() { + return authMethod; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -67,16 +75,17 @@ public class ContainerEndpoint { Objects.equals(scope, that.scope) && Objects.equals(names, that.names) && Objects.equals(weight, that.weight) && - Objects.equals(routingMethod, that.routingMethod); + Objects.equals(routingMethod, that.routingMethod) && + Objects.equals(authMethod, that.authMethod); } @Override public int hashCode() { - return Objects.hash(clusterId, names, scope, weight, routingMethod); + return Objects.hash(clusterId, names, scope, weight, routingMethod, authMethod); } @Override public String toString() { - return String.format("container endpoint %s -> %s [scope=%s, weight=%s, routingMetod=%s]", clusterId, names, scope, weight, routingMethod); + return String.format("container endpoint %s -> %s [scope=%s, weight=%s, routingMethod=%s, authMethod=%s]", clusterId, names, scope, weight, routingMethod, authMethod); } } diff --git a/config-model-api/src/test/java/com/yahoo/config/model/api/HostInfoTest.java b/config-model-api/src/test/java/com/yahoo/config/model/api/HostInfoTest.java index 7f1fd20ec5a..1d1ea593870 100644 --- a/config-model-api/src/test/java/com/yahoo/config/model/api/HostInfoTest.java +++ b/config-model-api/src/test/java/com/yahoo/config/model/api/HostInfoTest.java @@ -2,10 +2,11 @@ package com.yahoo.config.model.api; import org.junit.Test; -import com.google.common.testing.EqualsTester; - import java.util.Arrays; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + public class HostInfoTest { @Test public void testEquals() { @@ -14,10 +15,12 @@ public class HostInfoTest { HostInfo c = new HostInfo("foo.yahoo.com", Arrays.asList(new ServiceInfo("foo", "baz", null, null, "config-id", "host-name"))); HostInfo d = new HostInfo("foo.yahoo.com", Arrays.asList(new ServiceInfo("bar", "baz", null, null, "config-id", "host-name"))); HostInfo e = new HostInfo("bar.yahoo.com", null); - new EqualsTester() - .addEqualityGroup(a, b) - .addEqualityGroup(c) - .addEqualityGroup(d) - .addEqualityGroup(e).testEquals(); + assertEquals(a, b); + assertNotEquals(a, c); + assertNotEquals(a, d); + assertNotEquals(a, d); + assertNotEquals(c, d); + assertNotEquals(c, e); + assertNotEquals(d, e); } } diff --git a/config-model-api/src/test/java/com/yahoo/config/model/api/PortInfoTest.java b/config-model-api/src/test/java/com/yahoo/config/model/api/PortInfoTest.java index 21c5abf81f6..e7daf8a18f5 100644 --- a/config-model-api/src/test/java/com/yahoo/config/model/api/PortInfoTest.java +++ b/config-model-api/src/test/java/com/yahoo/config/model/api/PortInfoTest.java @@ -1,11 +1,13 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.model.api; -import com.google.common.testing.EqualsTester; import org.junit.Test; import java.util.Arrays; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + public class PortInfoTest { @Test public void testEquals() { @@ -13,9 +15,9 @@ public class PortInfoTest { PortInfo b = new PortInfo(1234, Arrays.asList("foo")); PortInfo c = new PortInfo(1234, Arrays.asList("foo", "bar")); PortInfo d = new PortInfo(12345, Arrays.asList("foo")); - new EqualsTester() - .addEqualityGroup(a, b) - .addEqualityGroup(c) - .addEqualityGroup(d).testEquals(); + assertEquals(a, b); + assertNotEquals(a, c); + assertNotEquals(a, d); + assertNotEquals(c, d); } } diff --git a/config-model-api/src/test/java/com/yahoo/config/model/api/ServiceInfoTest.java b/config-model-api/src/test/java/com/yahoo/config/model/api/ServiceInfoTest.java index 9a381bb8233..7638430486e 100644 --- a/config-model-api/src/test/java/com/yahoo/config/model/api/ServiceInfoTest.java +++ b/config-model-api/src/test/java/com/yahoo/config/model/api/ServiceInfoTest.java @@ -1,12 +1,14 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.model.api; -import com.google.common.testing.EqualsTester; import org.junit.Test; import java.util.Arrays; import java.util.Collections; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; + public class ServiceInfoTest { @@ -24,13 +26,32 @@ public class ServiceInfoTest { ServiceInfo g = new ServiceInfo("1", "0", Arrays.asList(new PortInfo(33, null)), Collections.singletonMap("foo", "bar"), "different-config-id", commonHostName); ServiceInfo h = new ServiceInfo("1", "0", Arrays.asList(new PortInfo(33, null)), Collections.singletonMap("foo", "bar"), commonConfigId, "different-host"); - new EqualsTester() - .addEqualityGroup(a, b) - .addEqualityGroup(c) - .addEqualityGroup(d) - .addEqualityGroup(e) - .addEqualityGroup(f) - .addEqualityGroup(g) - .addEqualityGroup(h).testEquals(); + assertEquals(a, b); + assertNotEquals(a, c); + assertNotEquals(a, d); + assertNotEquals(a, e); + assertNotEquals(a, f); + assertNotEquals(a, g); + assertNotEquals(a, h); + + assertNotEquals(c, d); + assertNotEquals(c, e); + assertNotEquals(c, f); + assertNotEquals(c, g); + assertNotEquals(c, h); + + assertNotEquals(d, e); + assertNotEquals(d, f); + assertNotEquals(d, g); + assertNotEquals(d, h); + + assertNotEquals(e, f); + assertNotEquals(e, g); + assertNotEquals(e, h); + + assertNotEquals(f, g); + assertNotEquals(f, h); + + assertNotEquals(g, h); } } diff --git a/config-model-fat/pom.xml b/config-model-fat/pom.xml index adf511faabe..55592897447 100644 --- a/config-model-fat/pom.xml +++ b/config-model-fat/pom.xml @@ -98,16 +98,10 @@ javax.security.auth.callback, javax.security.auth.x500, javax.security.auth, - javax.xml.datatype, - javax.xml.namespace, - javax.xml.parsers, - javax.xml.transform, - javax.xml.xpath, + javax.xml.*, <!-- expands to all packages in Java module java.xml --> org.bouncycastle.*, <!-- expands to all BC packages by Felix plugin --> - org.w3c.dom.bootstrap, - org.w3c.dom.ls, - org.w3c.dom, - org.xml.sax, + org.w3c.dom.*, <!-- expands to all packages in Java module java.xml --> + org.xml.sax.*, <!-- expands to all packages in Java module java.xml --> <!-- TODO: The fat bundle becomes more brittle for each package added below. Use interfaces in model-api instead. --> com.yahoo.vespa.config, com.yahoo.vespa.config.buildergen, @@ -195,7 +189,7 @@ <i>com.google.errorprone:error_prone_annotations:*:*</i> <i>com.google.guava:failureaccess:*:*</i> <i>com.google.guava:guava:*:*</i> - <i>com.google.inject:guice:jar:no_aop:*:*</i> + <i>com.google.inject:guice:jar:*:*</i> <i>com.google.j2objc:j2objc-annotations:*:*</i> <i>com.google.protobuf:protobuf-java:*:*</i> <i>com.sun.activation:javax.activation:*:*</i> @@ -207,6 +201,7 @@ <i>io.prometheus:simpleclient_tracer_common:*:*</i> <i>io.prometheus:simpleclient_tracer_otel:*:*</i> <i>io.prometheus:simpleclient_tracer_otel_agent:*:*</i> + <i>jakarta.inject:jakarta.inject-api:*:*</i> <i>javax.inject:javax.inject:*:*</i> <i>net.openhft:zero-allocation-hashing:*:*</i> <i>org.antlr:antlr-runtime:*:*</i> @@ -219,7 +214,6 @@ <i>org.slf4j:slf4j-api:*:*</i> <i>org.slf4j:slf4j-jdk14:*:*</i> <i>xerces:xercesImpl:*:*</i> - <i>xml-apis:xml-apis:*:*</i> </allowed> </enforceDependencies> </rules> diff --git a/config-model/src/main/java/com/yahoo/schema/DistributableResource.java b/config-model/src/main/java/com/yahoo/schema/DistributableResource.java index e7bdb68a03d..8594b40a367 100644 --- a/config-model/src/main/java/com/yahoo/schema/DistributableResource.java +++ b/config-model/src/main/java/com/yahoo/schema/DistributableResource.java @@ -8,7 +8,7 @@ import com.yahoo.path.Path; import java.nio.ByteBuffer; import java.util.Objects; -public class DistributableResource implements Comparable <DistributableResource> { +public class DistributableResource implements Comparable <DistributableResource>, Cloneable { public enum PathType { FILE, URI, BLOB } @@ -35,6 +35,11 @@ public class DistributableResource implements Comparable <DistributableResource> this.pathType = type; } + @Override + public DistributableResource clone() throws CloneNotSupportedException { + return (DistributableResource) super.clone(); + } + // TODO: Remove and make path/pathType final public void setFileName(String fileName) { Objects.requireNonNull(fileName, "Filename cannot be null"); diff --git a/config-model/src/main/java/com/yahoo/schema/OnnxModel.java b/config-model/src/main/java/com/yahoo/schema/OnnxModel.java index 90a27d1f036..3295b2e93aa 100644 --- a/config-model/src/main/java/com/yahoo/schema/OnnxModel.java +++ b/config-model/src/main/java/com/yahoo/schema/OnnxModel.java @@ -18,13 +18,15 @@ import java.util.Set; * * @author lesters */ -public class OnnxModel extends DistributableResource { +public class OnnxModel extends DistributableResource implements Cloneable { + // Model information private OnnxModelInfo modelInfo = null; private final Map<String, String> inputMap = new HashMap<>(); private final Map<String, String> outputMap = new HashMap<>(); private final Set<String> initializers = new HashSet<>(); + // Runtime options private String statelessExecutionMode = null; private Integer statelessInterOpThreads = null; private Integer statelessIntraOpThreads = null; @@ -40,6 +42,15 @@ public class OnnxModel extends DistributableResource { } @Override + public OnnxModel clone() { + try { + return (OnnxModel) super.clone(); // Shallow clone is sufficient here + } catch (CloneNotSupportedException e) { + throw new RuntimeException("Clone not supported", e); + } + } + + @Override public void setUri(String uri) { throw new IllegalArgumentException("URI for ONNX models are not currently supported"); } @@ -148,26 +159,24 @@ public class OnnxModel extends DistributableResource { } } + public Optional<Integer> getStatelessIntraOpThreads() { + return Optional.ofNullable(statelessIntraOpThreads); + } + public void setGpuDevice(int deviceNumber, boolean required) { if (deviceNumber >= 0) { this.gpuDevice = new GpuDevice(deviceNumber, required); } } - public Optional<Integer> getStatelessIntraOpThreads() { - return Optional.ofNullable(statelessIntraOpThreads); - } - public Optional<GpuDevice> getGpuDevice() { return Optional.ofNullable(gpuDevice); } public record GpuDevice(int deviceNumber, boolean required) { - public GpuDevice { if (deviceNumber < 0) throw new IllegalArgumentException("deviceNumber cannot be negative, got " + deviceNumber); } - } } diff --git a/config-model/src/main/java/com/yahoo/schema/derived/FileDistributedOnnxModels.java b/config-model/src/main/java/com/yahoo/schema/derived/FileDistributedOnnxModels.java index e3c697e3262..c3fa6aedf31 100644 --- a/config-model/src/main/java/com/yahoo/schema/derived/FileDistributedOnnxModels.java +++ b/config-model/src/main/java/com/yahoo/schema/derived/FileDistributedOnnxModels.java @@ -35,6 +35,16 @@ public class FileDistributedOnnxModels extends Derived implements OnnxModelsConf this.models = Collections.unmodifiableMap(distributableModels); } + private FileDistributedOnnxModels(Collection<OnnxModel> models) { + Map<String, OnnxModel> distributableModels = models.stream() + .collect(LinkedHashMap::new, (m, v) -> m.put(v.getName(), v.clone()), LinkedHashMap::putAll); + this.models = Collections.unmodifiableMap(distributableModels); + } + + public FileDistributedOnnxModels clone() { + return new FileDistributedOnnxModels(models.values()); + } + public Map<String, OnnxModel> asMap() { return models; } public void getConfig(OnnxModelsConfig.Builder builder) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java index 04faff688f8..e3c7693c608 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java @@ -7,7 +7,6 @@ import com.yahoo.config.ConfigInstance; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.config.model.producer.AnyConfigProducer; -import com.yahoo.config.model.producer.TreeConfigProducer; import com.yahoo.io.IOUtils; import com.yahoo.log.InvalidLogFormatException; import com.yahoo.log.LogMessage; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java index 584207caeac..07983c7c85a 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java @@ -39,6 +39,7 @@ import com.yahoo.vespa.model.container.component.Handler; import com.yahoo.vespa.model.container.component.SystemBindingPattern; import com.yahoo.vespa.model.container.configserver.ConfigserverCluster; import com.yahoo.vespa.model.utils.FileSender; + import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -48,7 +49,6 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -import static com.yahoo.config.model.api.ApplicationClusterEndpoint.RoutingMethod.sharedLayer4; import static com.yahoo.vespa.model.container.docproc.DocprocChains.DOCUMENT_TYPE_MANAGER_CLASS; /** @@ -98,7 +98,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat private Integer memoryPercentage = null; - private List<ApplicationClusterEndpoint> endpointList = List.of(); + private List<ApplicationClusterEndpoint> endpoints = List.of(); public ApplicationContainerCluster(TreeConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) { super(parent, configSubId, clusterId, deployState, true, 10); @@ -132,7 +132,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat super.doPrepare(deployState); addAndSendApplicationBundles(deployState); sendUserConfiguredFiles(deployState); - createEndpointList(deployState); + createEndpoints(deployState); } private void addAndSendApplicationBundles(DeployState deployState) { @@ -198,7 +198,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat } /** Create list of endpoints, these will be consumed later by LbServicesProducer */ - private void createEndpointList(DeployState deployState) { + private void createEndpoints(DeployState deployState) { if (!deployState.isHosted()) return; if (deployState.getProperties().applicationId().instance().isTester()) return; List<ApplicationClusterEndpoint> endpoints = new ArrayList<>(); @@ -224,25 +224,26 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat .dnsName(l4Name) .hosts(hosts) .clusterId(getName()) + .authMethod(ApplicationClusterEndpoint.AuthMethod.mtls) .build()); } } // Include all endpoints provided by controller endpointsFromController.stream() - .filter(ce -> ce.clusterId().equals(getName())) - .filter(ce -> ce.routingMethod() == sharedLayer4) - .forEach(ce -> ce.names().forEach( - name -> endpoints.add(ApplicationClusterEndpoint.builder() - .scope(ce.scope()) - .weight(Long.valueOf(ce.weight().orElse(1)).intValue()) // Default to weight=1 if not set - .routingMethod(ce.routingMethod()) - .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) - .hosts(hosts) - .clusterId(getName()) - .build()) - )); - endpointList = List.copyOf(endpoints); + .filter(ce -> ce.clusterId().equals(getName())) + .forEach(ce -> ce.names().forEach( + name -> endpoints.add(ApplicationClusterEndpoint.builder() + .scope(ce.scope()) + .weight(ce.weight().orElse(1)) // Default to weight=1 if not set + .routingMethod(ce.routingMethod()) + .dnsName(ApplicationClusterEndpoint.DnsName.from(name)) + .hosts(hosts) + .clusterId(getName()) + .authMethod(ce.authMethod()) + .build()) + )); + this.endpoints = List.copyOf(endpoints); } @Override @@ -364,7 +365,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat @Override public List<ApplicationClusterEndpoint> endpoints() { - return endpointList; + return endpoints; } @Override diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java index c227700733e..906ef739ef1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.model.container; import ai.vespa.models.evaluation.ModelsEvaluator; import com.yahoo.osgi.provider.model.ComponentModel; +import com.yahoo.schema.derived.FileDistributedOnnxModels; import com.yahoo.schema.derived.RankProfileList; import com.yahoo.vespa.config.search.RankProfilesConfig; import com.yahoo.vespa.config.search.core.OnnxModelsConfig; @@ -42,9 +43,16 @@ public class ContainerModelEvaluation implements /** Global rank profiles, aka models */ private final RankProfileList rankProfileList; + private final FileDistributedOnnxModels onnxModels; // For cluster specific ONNX model settings public ContainerModelEvaluation(ApplicationContainerCluster cluster, RankProfileList rankProfileList) { + this(cluster, rankProfileList, null); + } + + public ContainerModelEvaluation(ApplicationContainerCluster cluster, + RankProfileList rankProfileList, FileDistributedOnnxModels onnxModels) { this.rankProfileList = Objects.requireNonNull(rankProfileList, "rankProfileList cannot be null"); + this.onnxModels = onnxModels; cluster.addSimpleComponent(EVALUATOR_NAME, null, EVALUATION_BUNDLE_NAME); cluster.addComponent(ContainerModelEvaluation.getHandler()); } @@ -61,7 +69,11 @@ public class ContainerModelEvaluation implements @Override public void getConfig(OnnxModelsConfig.Builder builder) { - rankProfileList.getConfig(builder); + if (onnxModels != null) { + onnxModels.getConfig(builder); + } else { + rankProfileList.getConfig(builder); + } } public void getConfig(RankingExpressionsConfig.Builder builder) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java index ae13bed4bb4..d276bf3b850 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java @@ -22,6 +22,7 @@ import org.w3c.dom.Element; import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.Set; import java.util.logging.Level; /** @@ -33,6 +34,12 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilderBase<Ht static final String REQUEST_CHAIN_TAG_NAME = "request-chain"; static final String RESPONSE_CHAIN_TAG_NAME = "response-chain"; static final List<String> VALID_FILTER_CHAIN_TAG_NAMES = List.of(REQUEST_CHAIN_TAG_NAME, RESPONSE_CHAIN_TAG_NAME); + private final Set<Integer> portBindingOverrides; + + public HttpBuilder(Set<Integer> portBindingOverrides) { + super(); + this.portBindingOverrides = portBindingOverrides; + } @Override protected Http doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element spec) { @@ -44,7 +51,7 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilderBase<Ht Element filteringElem = XML.getChild(spec, "filtering"); if (filteringElem != null) { filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem); - bindings = readFilterBindings(filteringElem); + bindings = readFilterBindings(filteringElem, this.portBindingOverrides); strictFiltering = XmlHelper.getOptionalAttribute(filteringElem, "strict-mode") .map(Boolean::valueOf); @@ -140,7 +147,7 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilderBase<Ht return Optional.of((ApplicationContainerCluster) currentProducer); } - private List<FilterBinding> readFilterBindings(Element filteringSpec) { + private List<FilterBinding> readFilterBindings(Element filteringSpec, Set<Integer> portBindingOverride) { List<FilterBinding> result = new ArrayList<>(); for (Element child: XML.getChildren(filteringSpec)) { @@ -150,7 +157,14 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilderBase<Ht for (Element bindingSpec: XML.getChildren(child, "binding")) { String binding = XML.getValue(bindingSpec); - result.add(FilterBinding.create(toFilterBindingType(tagName), chainId, UserBindingPattern.fromPattern(binding))); + if (portBindingOverride.isEmpty()) { + result.add(FilterBinding.create(toFilterBindingType(tagName), chainId, UserBindingPattern.fromPattern(binding))); + } else { + UserBindingPattern userBindingPattern = UserBindingPattern.fromPattern(binding); + portBindingOverride.stream() + .map(userBindingPattern::withOverriddenPort) + .forEach(pattern -> result.add(FilterBinding.create(toFilterBindingType(tagName), chainId, pattern))); + } } } } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java index 0e72cff1688..459c54a2805 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java @@ -44,6 +44,7 @@ import com.yahoo.jdisc.http.server.jetty.VoidRequestLog; import com.yahoo.osgi.provider.model.ComponentModel; import com.yahoo.path.Path; import com.yahoo.schema.OnnxModel; +import com.yahoo.schema.derived.FileDistributedOnnxModels; import com.yahoo.schema.derived.RankProfileList; import com.yahoo.search.rendering.RendererRegistry; import com.yahoo.security.X509CertificateUtils; @@ -443,7 +444,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { protected void addHttp(DeployState deployState, Element spec, ApplicationContainerCluster cluster, ConfigModelContext context) { Element httpElement = XML.getChild(spec, "http"); if (httpElement != null) { - cluster.setHttp(buildHttp(deployState, cluster, httpElement)); + cluster.setHttp(buildHttp(deployState, cluster, httpElement, context)); } if (isHostedTenantApplication(context)) { addHostedImplicitHttpIfNotPresent(deployState, cluster); @@ -706,8 +707,8 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { .configureHttpFilterChains(http); } - private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement) { - Http http = new HttpBuilder().build(deployState, cluster, httpElement); + private Http buildHttp(DeployState deployState, ApplicationContainerCluster cluster, Element httpElement, ConfigModelContext context) { + Http http = new HttpBuilder(portBindingOverride(deployState, context)).build(deployState, cluster, httpElement); if (networking == Networking.disable) http.removeAllServers(); @@ -751,10 +752,13 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { RankProfileList profiles = context.vespaModel() != null ? context.vespaModel().rankProfileList() : RankProfileList.empty; + // Create a copy of models so each cluster can have its own specific settings + FileDistributedOnnxModels models = profiles.getOnnxModels().clone(); + Element onnxElement = XML.getChild(modelEvaluationElement, "onnx"); Element modelsElement = XML.getChild(onnxElement, "models"); for (Element modelElement : XML.getChildren(modelsElement, "model") ) { - OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name")); + OnnxModel onnxModel = models.asMap().get(modelElement.getAttribute("name")); if (onnxModel == null) { String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet()); context.getDeployState().getDeployLogger().logApplicationPackage(WARNING, @@ -774,7 +778,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { } } - cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles)); + cluster.setModelEvaluation(new ContainerModelEvaluation(cluster, profiles, models)); } private String getStringValue(Element element, String name, String defaultValue) { @@ -832,10 +836,9 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { } private void addUserHandlers(DeployState deployState, ApplicationContainerCluster cluster, Element spec, ConfigModelContext context) { - var portBindingOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of(); for (Element component: XML.getChildren(spec, "handler")) { cluster.addComponent( - new DomHandlerBuilder(cluster, portBindingOverride).build(deployState, cluster, component)); + new DomHandlerBuilder(cluster, portBindingOverride(deployState, context)).build(deployState, cluster, component)); } } @@ -1168,11 +1171,14 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields"); - var portBindingOverride = isHostedTenantApplication(context) + return new ContainerDocumentApi(cluster, documentApiOptions, + "true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride(deployState, context)); + } + + private Set<Integer> portBindingOverride(DeployState deployState, ConfigModelContext context) { + return isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of(); - return new ContainerDocumentApi(cluster, documentApiOptions, - "true".equals(XML.getValue(ignoreUndefinedFields)), portBindingOverride); } private ContainerDocproc buildDocproc(DeployState deployState, ApplicationContainerCluster cluster, Element spec) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java index 4f98102a61f..bb72eda7d04 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java @@ -600,9 +600,6 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem name("health")); builder.consumer( new MetricsmanagerConfig.Consumer.Builder(). - name("fleetcontroller")); - builder.consumer( - new MetricsmanagerConfig.Consumer.Builder(). name("statereporter"). addedmetrics("*"). removedtags("thread"). diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java index 872fda9d909..e3d35e768b7 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java @@ -63,12 +63,6 @@ public class StorageCluster extends TreeConfigProducer<StorageNode> @Override public void getConfig(MetricsmanagerConfig.Builder builder) { - ContentCluster.getMetricBuilder("fleetcontroller", builder). - addedmetrics(StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.baseName()). - addedmetrics(StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.baseName()). - addedmetrics(StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.baseName()). - addedmetrics(StorageMetrics.VDS_DATASTORED_BUCKET_SPACE_BUCKETS_TOTAL.baseName()); - ContentCluster.getMetricBuilder("log", builder). addedmetrics("vds.filestor.allthreads.put"). addedmetrics("vds.filestor.allthreads.get"). diff --git a/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.onnx b/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.onnx new file mode 100644 index 00000000000..087e2c3427f --- /dev/null +++ b/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.onnx @@ -0,0 +1,16 @@ +mul.py:f + +input1 +input2output"MulmulZ +input1 + + +Z +input2 + + +b +output + + +B
\ No newline at end of file diff --git a/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.py b/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.py new file mode 100755 index 00000000000..9fcb8612af9 --- /dev/null +++ b/config-model/src/test/cfg/application/onnx_cluster_specific/models/mul.py @@ -0,0 +1,26 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1]) +INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1]) + +nodes = [ + helper.make_node( + 'Mul', + ['input1', 'input2'], + ['output'], + ), +] +graph_def = helper.make_graph( + nodes, + 'mul', + [ + INPUT_1, + INPUT_2 + ], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='mul.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'mul.onnx') diff --git a/config-model/src/test/cfg/application/onnx_cluster_specific/services.xml b/config-model/src/test/cfg/application/onnx_cluster_specific/services.xml new file mode 100644 index 00000000000..06b9a8c3a55 --- /dev/null +++ b/config-model/src/test/cfg/application/onnx_cluster_specific/services.xml @@ -0,0 +1,34 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. --> +<services version="1.0"> + + <container id="c1" version="1.0"> + <model-evaluation> + <onnx> + <models> + <model name="mul"> + <intraop-threads>2</intraop-threads> + <gpu-device>0</gpu-device> + </model> + </models> + </onnx> + </model-evaluation> + </container> + + <container id="c2" version="1.0"> + <http> + <server id="c1Server" port="8081" /> + </http> + <model-evaluation> + <onnx> + <models> + <model name="mul"> + <intraop-threads>4</intraop-threads> + <gpu-device>1</gpu-device> + </model> + </models> + </onnx> + </model-evaluation> + </container> + +</services> diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java index 2562e1e3124..894fc55c014 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java @@ -439,7 +439,6 @@ public class ContainerClusterTest { cluster.doPrepare(state); List<ApplicationClusterEndpoint> endpoints = cluster.endpoints(); - assertNames(List.of(), endpoints.stream().filter(e -> e.routingMethod() == shared).toList()); assertNames(expectedSharedL4Names, endpoints.stream().filter(e -> e.routingMethod() == sharedLayer4).toList()); List<ContainerEndpoint> endpointsWithWeight = diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterBindingsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterBindingsTest.java index 787a8255628..70a859af010 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterBindingsTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterBindingsTest.java @@ -14,6 +14,8 @@ import com.yahoo.vespa.model.container.xml.ContainerModelBuilder.Networking; import org.junit.jupiter.api.Test; import org.w3c.dom.Element; +import java.util.Set; + import static com.yahoo.collections.CollectionUtil.first; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -26,7 +28,7 @@ public class FilterBindingsTest extends DomBuilderTest { private static final BindingPattern MY_CHAIN_BINDING = UserBindingPattern.fromHttpPath("/my-chain-binding"); private Http buildHttp(Element xml) { - Http http = new HttpBuilder().build(root.getDeployState(), root, xml); + Http http = new HttpBuilder(Set.of()).build(root.getDeployState(), root, xml); root.freezeModelTopology(); http.validate(); return http; @@ -108,4 +110,21 @@ public class FilterBindingsTest extends DomBuilderTest { } } + @Test + void filter_binding_ports_are_overriden() { + Element xml = parse( + "<http>", + " <filtering>", + " <request-chain id='my-request-chain'>", + " <binding>http://*/my-binding</binding>", + " </request-chain>", + " </filtering>", + "</http>"); + Http http = new HttpBuilder(Set.of(4443)).build(root.getDeployState(), root, xml); + root.freezeModelTopology(); + http.validate(); + FilterBinding binding = first(http.getBindings()); + assertEquals("my-request-chain", binding.chainId().getName()); + assertEquals("http://*:4443/my-binding", binding.binding().patternString()); + } } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterChainsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterChainsTest.java index 990896acb01..1c60205039f 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterChainsTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterChainsTest.java @@ -9,6 +9,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.w3c.dom.Element; +import java.util.Set; + import static com.yahoo.collections.CollectionUtil.first; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -23,7 +25,7 @@ public class FilterChainsTest extends DomBuilderTest { @BeforeEach public void setupFilterChains() { - http = new HttpBuilder().build(root.getDeployState(), root, servicesXml()); + http = new HttpBuilder(Set.of()).build(root.getDeployState(), root, servicesXml()); root.freezeModelTopology(); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterConfigTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterConfigTest.java index 76a3dcb2788..a1f9661de14 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterConfigTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/http/FilterConfigTest.java @@ -8,6 +8,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.w3c.dom.Element; +import java.util.Set; + import static com.yahoo.collections.CollectionUtil.first; import static com.yahoo.vespa.model.container.http.FilterConfigProvider.configProviderId; import static org.junit.jupiter.api.Assertions.*; @@ -22,7 +24,7 @@ public class FilterConfigTest extends DomBuilderTest { @BeforeEach public void setupFilterChains() { - http = new HttpBuilder().build(root.getDeployState(), root, servicesXml()); + http = new HttpBuilder(Set.of()).build(root.getDeployState(), root, servicesXml()); root.freezeModelTopology(); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java index 697d2d422e8..740986bb000 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java @@ -225,7 +225,7 @@ public class AccessControlTest extends ContainerModelBuilderTestBase { "http://*:4443/metrics/v2/*"))); Set<String> actualCustomChainBindings = getFilterBindings(http, ComponentId.fromString("my-custom-request-chain")); - assertTrue(actualCustomChainBindings.containsAll(List.of("http://*/custom-handler/*", "http://*/"))); + assertTrue(actualCustomChainBindings.containsAll(List.of("http://*:4443/custom-handler/*", "http://*:4443/"))); } @Test @@ -262,7 +262,7 @@ public class AccessControlTest extends ContainerModelBuilderTestBase { "http://*:4443/custom-handler/*"))); Set<String> actualCustomChainBindings = getFilterBindings(http, ComponentId.fromString("my-custom-response-chain")); - assertTrue(actualCustomChainBindings.contains("http://*/custom-handler/*")); + assertTrue(actualCustomChainBindings.contains("http://*:4443/custom-handler/*")); } @Test diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java index 1360ca259dd..2726d64eafc 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java @@ -783,38 +783,35 @@ public class ContentClusterTest extends ContentBaseTest { cluster.getConfig(builder); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); + assertEquals(5, config.consumer().size()); - assertEquals(6, config.consumer().size()); - assertEquals("status", config.consumer(0).name()); - assertEquals("*", config.consumer(0).addedmetrics(0)); - assertEquals("partofsum", config.consumer(0).removedtags(0)); + var status = config.consumer(0); + assertEquals("status", status.name()); + assertEquals("*", status.addedmetrics(0)); + assertEquals("partofsum", status.removedtags(0)); - assertEquals("log", config.consumer(1).name()); - assertEquals("logdefault", config.consumer(1).tags().get(0)); - assertEquals("loadtype", config.consumer(1).removedtags(0)); + var log = config.consumer(1); + assertEquals("log", log.name()); + assertEquals("logdefault", log.tags().get(0)); + assertEquals("loadtype", log.removedtags(0)); - assertEquals("yamas", config.consumer(2).name()); - assertEquals("yamasdefault", config.consumer(2).tags().get(0)); - assertEquals("loadtype", config.consumer(2).removedtags(0)); + var yamas = config.consumer(2); + assertEquals("yamas", yamas.name()); + assertEquals("yamasdefault", yamas.tags().get(0)); + assertEquals("loadtype", yamas.removedtags(0)); assertEquals("health", config.consumer(3).name()); - assertEquals("statereporter", config.consumer(5).name()); - assertEquals("*", config.consumer(5).addedmetrics(0)); - assertEquals("thread", config.consumer(5).removedtags(0)); - assertEquals("partofsum", config.consumer(5).removedtags(1)); - assertEquals(0, config.consumer(5).tags().size()); + var stateReporter = config.consumer(4); + assertEquals("statereporter", stateReporter.name()); + assertEquals("*", stateReporter.addedmetrics(0)); + assertEquals("thread", stateReporter.removedtags(0)); + assertEquals("partofsum", stateReporter.removedtags(1)); + assertEquals(0, stateReporter.tags().size()); cluster.getStorageCluster().getConfig(builder); config = new MetricsmanagerConfig(builder); - assertEquals(6, config.consumer().size()); - - assertEquals("fleetcontroller", config.consumer(4).name()); - assertEquals(4, config.consumer(4).addedmetrics().size()); - assertEquals("vds.datastored.alldisks.docs", config.consumer(4).addedmetrics(0)); - assertEquals("vds.datastored.alldisks.bytes", config.consumer(4).addedmetrics(1)); - assertEquals("vds.datastored.alldisks.buckets", config.consumer(4).addedmetrics(2)); - assertEquals("vds.datastored.bucket_space.buckets_total", config.consumer(4).addedmetrics(3)); + assertEquals(5, config.consumer().size()); } public MetricsmanagerConfig.Consumer getConsumer(String consumer, MetricsmanagerConfig config) { @@ -872,13 +869,6 @@ public class ContentClusterTest extends ContentBaseTest { String actual = getConsumer("log", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); - expected = - "[vds.datastored.alldisks.docs\n" + - "vds.datastored.alldisks.bytes\n" + - "vds.datastored.alldisks.buckets\n" + - "vds.datastored.bucket_space.buckets_total]"; - actual = getConsumer("fleetcontroller", config).addedmetrics().toString().replaceAll(", ", "\n"); - assertEquals(expected, actual); } { diff --git a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java index fc70a65b394..137907cb003 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java @@ -84,6 +84,30 @@ public class ModelEvaluationTest { } } + @Test + void testContainerSpecificModelSettings() { + Path appDir = Path.fromString("src/test/cfg/application/onnx_cluster_specific"); + try { + ImportedModelTester tester = new ImportedModelTester("mul", appDir); + VespaModel model = tester.createVespaModel(); + OnnxModelsConfig.Model c1Model = getOnnxModelsConfig(model.getContainerClusters().get("c1")); + OnnxModelsConfig.Model c2Model = getOnnxModelsConfig(model.getContainerClusters().get("c2")); + assertEquals(2, c1Model.stateless_intraop_threads()); + assertEquals(4, c2Model.stateless_intraop_threads()); + assertEquals(0, c1Model.gpu_device()); + assertEquals(1, c2Model.gpu_device()); + } finally { + IOUtils.recursiveDeleteDir(appDir.append(ApplicationPackage.MODELS_GENERATED_DIR).toFile()); + } + + } + + private OnnxModelsConfig.Model getOnnxModelsConfig(ApplicationContainerCluster cluster) { + OnnxModelsConfig.Builder ob = new OnnxModelsConfig.Builder(); + cluster.getConfig(ob); + return new OnnxModelsConfig(ob).model(0); + } + private void assertHasMlModels(VespaModel model, Path appDir) { ApplicationContainerCluster cluster = model.getContainerClusters().get("container"); assertNotNull(cluster.getComponentsMap().get(new ComponentId(ModelsEvaluator.class.getName()))); diff --git a/config/pom.xml b/config/pom.xml index 9f3ec28b54b..e9d19e8e9cb 100755 --- a/config/pom.xml +++ b/config/pom.xml @@ -80,11 +80,6 @@ <scope>test</scope> </dependency> <dependency> - <groupId>com.google.guava</groupId> - <artifactId>guava-testlib</artifactId> - <scope>test</scope> - </dependency> - <dependency> <groupId>com.yahoo.vespa</groupId> <artifactId>testutil</artifactId> <version>${project.version}</version> diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/PayloadTest.java b/config/src/test/java/com/yahoo/vespa/config/protocol/PayloadTest.java index af6aefb26e1..31d9ebd01e8 100644 --- a/config/src/test/java/com/yahoo/vespa/config/protocol/PayloadTest.java +++ b/config/src/test/java/com/yahoo/vespa/config/protocol/PayloadTest.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; -import com.google.common.testing.EqualsTester; import com.yahoo.slime.Slime; import com.yahoo.text.Utf8Array; import com.yahoo.vespa.config.ConfigPayload; @@ -11,6 +10,7 @@ import org.junit.Test; import java.nio.charset.StandardCharsets; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; /** * @author Ulf Lilleengen @@ -38,36 +38,36 @@ public class PayloadTest { Payload a = Payload.from(foo1); Payload b = Payload.from(foo1); + assertEquals(a, b); Payload c = Payload.from(foo2); + assertNotEquals(a, c); Slime slime = new Slime(); slime.setString(foo1); Payload d = Payload.from(new ConfigPayload(slime)); + assertNotEquals(a, d); slime.setString(foo1); Payload e = Payload.from(new ConfigPayload(slime)); + assertEquals(d, e); slime.setString("foo 2"); Payload f = Payload.from(new ConfigPayload(slime)); + assertNotEquals(c, f); - Payload g, h, i, j; - g = Payload.from(new Utf8Array(foo1.getBytes(StandardCharsets.UTF_8)), CompressionInfo.uncompressed()); - h = Payload.from(new Utf8Array(foo1.getBytes(StandardCharsets.UTF_8)), CompressionInfo.uncompressed()); + Payload g = Payload.from(new Utf8Array(foo1.getBytes(StandardCharsets.UTF_8)), CompressionInfo.uncompressed()); + Payload h = Payload.from(new Utf8Array(foo1.getBytes(StandardCharsets.UTF_8)), CompressionInfo.uncompressed()); + assertEquals(a, g); + assertEquals(g, h); LZ4PayloadCompressor compressor = new LZ4PayloadCompressor(); CompressionInfo info = CompressionInfo.create(CompressionType.LZ4, foo2.length()); Utf8Array compressed = new Utf8Array(compressor.compress(foo2.getBytes())); - i = Payload.from(compressed, info); - j = Payload.from(compressed, info); - - new EqualsTester() - .addEqualityGroup(a, b, g, h) - .addEqualityGroup(c) - .addEqualityGroup(d, e) - .addEqualityGroup(f) - .addEqualityGroup(i, j). - testEquals(); + Payload i = Payload.from(compressed, info); + Payload j = Payload.from(compressed, info); + assertEquals(i, j); + assertNotEquals(c, j); } } diff --git a/configserver-flags/pom.xml b/configserver-flags/pom.xml index 02824f2e6e3..02395fc3559 100644 --- a/configserver-flags/pom.xml +++ b/configserver-flags/pom.xml @@ -58,7 +58,7 @@ <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> <scope>provided</scope> - <classifier>no_aop</classifier> + <exclusions> <exclusion> <groupId>junit</groupId> diff --git a/configserver/pom.xml b/configserver/pom.xml index bfef0748989..95ab31155ce 100644 --- a/configserver/pom.xml +++ b/configserver/pom.xml @@ -12,78 +12,6 @@ <packaging>container-plugin</packaging> <version>8-SNAPSHOT</version> <dependencies> - - <!-- BEGIN Jersey deps. - TODO: Vespa > 8, and provision-controller does not import any Jersey related packages: - Remove, and remove all package-info.java files for jersey/jackson packages. --> - - <dependency> - <groupId>com.fasterxml.jackson.jaxrs</groupId> - <artifactId>jackson-jaxrs-json-provider</artifactId> - <version>${jackson2.vespa.version}</version> - <exclusions> - <exclusion> - <!-- Conflicts with javax.activation:javax.activation-api:1.2.0, which is "exported" via jdisc_core. --> - <groupId>jakarta.activation</groupId> - <artifactId>jakarta.activation-api</artifactId> - </exclusion> - <exclusion> - <!-- Conflicts with javax.xml.bind:jaxb-api:2.3, which is "exported" via jdisc_core.--> - <groupId>jakarta.xml.bind</groupId> - <artifactId>jakarta.xml.bind-api</artifactId> - </exclusion> - </exclusions> - </dependency> - <dependency> - <groupId>javax.ws.rs</groupId> - <artifactId>javax.ws.rs-api</artifactId> - <scope>provided</scope> - </dependency> - <dependency> - <groupId>org.glassfish.jersey.core</groupId> - <artifactId>jersey-client</artifactId> - <version>${jersey.vespa.version}</version> - </dependency> - <dependency> - <groupId>org.glassfish.jersey.core</groupId> - <artifactId>jersey-server</artifactId> - <version>${jersey.vespa.version}</version> - <exclusions> - <exclusion> - <groupId>org.glassfish.jersey.media</groupId> - <artifactId>jersey-media-jaxb</artifactId> - </exclusion> - </exclusions> - </dependency> - <dependency> - <groupId>org.glassfish.jersey.ext</groupId> - <artifactId>jersey-proxy-client</artifactId> - <version>${jersey.vespa.version}</version> - </dependency> - <dependency> - <groupId>org.glassfish.jersey.media</groupId> - <artifactId>jersey-media-json-jackson</artifactId> - <version>${jersey.vespa.version}</version> - <exclusions> - <!-- Prevent embedding deps provided by jdisc --> - <exclusion> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-annotations</artifactId> - </exclusion> - <exclusion> - <groupId>com.fasterxml.jackson.core</groupId> - <artifactId>jackson-core</artifactId> - </exclusion> - </exclusions> - </dependency> - <dependency> - <groupId>org.glassfish.jersey.media</groupId> - <artifactId>jersey-media-multipart</artifactId> - <version>${jersey.vespa.version}</version> - </dependency> - - <!-- END Jersey deps --> - <dependency> <groupId>org.hamcrest</groupId> <artifactId>hamcrest</artifactId> @@ -95,8 +23,8 @@ <scope>test</scope> </dependency> <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <scope>test</scope> </dependency> <dependency> @@ -288,11 +216,7 @@ <artifactId>bundle-plugin</artifactId> <extensions>true</extensions> <configuration> - <!-- TODO: Vespa > 8: remove importPackage when the jackson-jaxrs-json-provider bundle is no longer installed in jdisc --> - <!-- added to ensure using the same class as orchestrator, core-dump-reporter, provision-controller and controller-clients --> - <importPackage>com.fasterxml.jackson.jaxrs.json;version="[2.12.6,3)"</importPackage> - <allowEmbeddedArtifacts>com.fasterxml.jackson.core:jackson-annotations, com.fasterxml.jackson.core:jackson-core, - com.yahoo.vespa:airlift-zstd</allowEmbeddedArtifacts> + <allowEmbeddedArtifacts>com.yahoo.vespa:airlift-zstd</allowEmbeddedArtifacts> </configuration> </plugin> <plugin> diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java index b813d56b345..33f47bbc154 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java @@ -11,6 +11,7 @@ import com.yahoo.slime.SlimeUtils; import java.util.ArrayList; import java.util.List; +import java.util.OptionalInt; /** * Contains all methods for de-/serializing ContainerEndpoints to/from JSON. @@ -32,48 +33,48 @@ public class ContainerEndpointSerializer { private static final String namesField = "names"; private static final String weightField = "weight"; private static final String routingMethodField = "routingMethod"; + private static final String authMethodField = "authMethod"; private ContainerEndpointSerializer() {} public static ContainerEndpoint endpointFromSlime(Inspector inspector) { - final var clusterId = inspector.field(clusterIdField).asString(); - final var scope = inspector.field(scopeField).asString(); - final var namesInspector = inspector.field(namesField); - final var weight = SlimeUtils.optionalInteger(inspector.field(weightField)); + String clusterId = inspector.field(clusterIdField).asString(); + String scope = inspector.field(scopeField).asString(); + Inspector namesInspector = inspector.field(namesField); + OptionalInt weight = SlimeUtils.optionalInteger(inspector.field(weightField)); // assign default routingmethod. Remove when 7.507 is latest version - // Cannot be used before all endpoints are assigned explicit routingmethod (from controller) - final var routingMethod = SlimeUtils.optionalString(inspector.field(routingMethodField)).orElse(ApplicationClusterEndpoint.RoutingMethod.sharedLayer4.name()); + // Cannot be used before all endpoints are assigned explicit routing method (from controller) + ApplicationClusterEndpoint.RoutingMethod routingMethod = SlimeUtils.optionalString(inspector.field(routingMethodField)) + .map(ContainerEndpointSerializer::routingMethodFrom) + .orElse(ApplicationClusterEndpoint.RoutingMethod.sharedLayer4); + ApplicationClusterEndpoint.AuthMethod authMethod = SlimeUtils.optionalString(inspector.field(authMethodField)) + .map(ContainerEndpointSerializer::authMethodFrom) + .orElse(ApplicationClusterEndpoint.AuthMethod.mtls); if (clusterId.isEmpty()) { throw new IllegalStateException("'clusterId' missing on serialized ContainerEndpoint"); } - if (scope.isEmpty()) { throw new IllegalStateException("'scope' missing on serialized ContainerEndpoint"); } - - if (! namesInspector.valid()) { + if (!namesInspector.valid()) { throw new IllegalStateException("'names' missing on serialized ContainerEndpoint"); } - if(routingMethod.isEmpty()) { - throw new IllegalStateException("'routingMethod' missing on serialized ContainerEndpoint"); - } - - final var names = new ArrayList<String>(); + List<String> names = new ArrayList<>(); namesInspector.traverse((ArrayTraverser) (idx, nameInspector) -> { final var containerName = nameInspector.asString(); names.add(containerName); }); - return new ContainerEndpoint(clusterId, ApplicationClusterEndpoint.Scope.valueOf(scope), names, weight, - ApplicationClusterEndpoint.RoutingMethod.valueOf(routingMethod)); + return new ContainerEndpoint(clusterId, scopeFrom(scope), names, weight, routingMethod, authMethod); } public static List<ContainerEndpoint> endpointListFromSlime(Slime slime) { final var inspector = slime.get(); return endpointListFromSlime(inspector); } + public static List<ContainerEndpoint> endpointListFromSlime(Inspector inspector) { final var endpoints = new ArrayList<ContainerEndpoint>(); @@ -88,11 +89,12 @@ public class ContainerEndpointSerializer { public static void endpointToSlime(Cursor cursor, ContainerEndpoint endpoint) { cursor.setString(clusterIdField, endpoint.clusterId()); - cursor.setString(scopeField, endpoint.scope().name()); + cursor.setString(scopeField, asString(endpoint.scope())); endpoint.weight().ifPresent(w -> cursor.setLong(weightField, w)); final var namesInspector = cursor.setArray(namesField); endpoint.names().forEach(namesInspector::addString); - cursor.setString(routingMethodField, endpoint.routingMethod().name()); + cursor.setString(routingMethodField, asString(endpoint.routingMethod())); + cursor.setString(authMethodField, asString(endpoint.authMethod())); } public static Slime endpointListToSlime(List<ContainerEndpoint> endpoints) { @@ -107,4 +109,53 @@ public class ContainerEndpointSerializer { return slime; } + private static ApplicationClusterEndpoint.RoutingMethod routingMethodFrom(String s) { + return switch (s) { + case "shared" -> ApplicationClusterEndpoint.RoutingMethod.shared; + case "sharedLayer4" -> ApplicationClusterEndpoint.RoutingMethod.sharedLayer4; + case "exclusive" -> ApplicationClusterEndpoint.RoutingMethod.exclusive; + default -> throw new IllegalArgumentException("Unknown routing method '" + s + "'"); + }; + } + + private static ApplicationClusterEndpoint.AuthMethod authMethodFrom(String s) { + return switch (s) { + case "mtls" -> ApplicationClusterEndpoint.AuthMethod.mtls; + case "token" -> ApplicationClusterEndpoint.AuthMethod.token; + default -> throw new IllegalArgumentException("Unknown auth method '" + s + "'"); + }; + } + + private static ApplicationClusterEndpoint.Scope scopeFrom(String s) { + return switch (s) { + case "global" -> ApplicationClusterEndpoint.Scope.global; + case "application" -> ApplicationClusterEndpoint.Scope.application; + case "zone" -> ApplicationClusterEndpoint.Scope.zone; + default -> throw new IllegalArgumentException("Unknown scope '" + s + "'"); + }; + } + + private static String asString(ApplicationClusterEndpoint.RoutingMethod routingMethod) { + return switch (routingMethod) { + case shared -> "shared"; + case sharedLayer4 -> "sharedLayer4"; + case exclusive -> "exclusive"; + }; + } + + private static String asString(ApplicationClusterEndpoint.AuthMethod authMethod) { + return switch (authMethod) { + case mtls -> "mtls"; + case token -> "token"; + }; + } + + private static String asString(ApplicationClusterEndpoint.Scope scope) { + return switch (scope) { + case global -> "global"; + case application -> "application"; + case zone -> "zone"; + }; + } + } diff --git a/configserver/src/main/java/org/glassfish/jersey/client/package-info.java b/configserver/src/main/java/org/glassfish/jersey/client/package-info.java deleted file mode 100644 index 151d9dbe952..00000000000 --- a/configserver/src/main/java/org/glassfish/jersey/client/package-info.java +++ /dev/null @@ -1,5 +0,0 @@ -@ExportPackage(version = @Version(major = 2, minor = 25)) -package org.glassfish.jersey.client; - -import com.yahoo.osgi.annotation.ExportPackage; -import com.yahoo.osgi.annotation.Version; diff --git a/configserver/src/main/java/org/glassfish/jersey/client/proxy/package-info.java b/configserver/src/main/java/org/glassfish/jersey/client/proxy/package-info.java deleted file mode 100644 index 9ac5941eb3d..00000000000 --- a/configserver/src/main/java/org/glassfish/jersey/client/proxy/package-info.java +++ /dev/null @@ -1,5 +0,0 @@ -@ExportPackage(version = @Version(major = 2, minor = 25)) -package org.glassfish.jersey.client.proxy; - -import com.yahoo.osgi.annotation.ExportPackage; -import com.yahoo.osgi.annotation.Version; diff --git a/configserver/src/main/java/org/glassfish/jersey/logging/package-info.java b/configserver/src/main/java/org/glassfish/jersey/logging/package-info.java deleted file mode 100644 index 6d9049e3c43..00000000000 --- a/configserver/src/main/java/org/glassfish/jersey/logging/package-info.java +++ /dev/null @@ -1,5 +0,0 @@ -@ExportPackage(version = @Version(major = 2, minor = 25)) -package org.glassfish.jersey.logging; - -import com.yahoo.osgi.annotation.ExportPackage; -import com.yahoo.osgi.annotation.Version; diff --git a/configserver/src/main/java/org/glassfish/jersey/media/multipart/file/package-info.java b/configserver/src/main/java/org/glassfish/jersey/media/multipart/file/package-info.java deleted file mode 100644 index 06248ca88c1..00000000000 --- a/configserver/src/main/java/org/glassfish/jersey/media/multipart/file/package-info.java +++ /dev/null @@ -1,5 +0,0 @@ -@ExportPackage(version = @Version(major = 2, minor = 25)) -package org.glassfish.jersey.media.multipart.file; - -import com.yahoo.osgi.annotation.ExportPackage; -import com.yahoo.osgi.annotation.Version; diff --git a/configserver/src/main/java/org/glassfish/jersey/media/multipart/package-info.java b/configserver/src/main/java/org/glassfish/jersey/media/multipart/package-info.java deleted file mode 100644 index 1faa237a7ea..00000000000 --- a/configserver/src/main/java/org/glassfish/jersey/media/multipart/package-info.java +++ /dev/null @@ -1,5 +0,0 @@ -@ExportPackage(version = @Version(major = 2, minor = 25)) -package org.glassfish.jersey.media.multipart; - -import com.yahoo.osgi.annotation.ExportPackage; -import com.yahoo.osgi.annotation.Version; diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java index c8f31697c5e..5e7fe64f998 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java @@ -8,7 +8,6 @@ import org.junit.Test; import java.util.List; import java.util.OptionalInt; -import java.util.OptionalLong; import static org.junit.Assert.assertEquals; @@ -46,11 +45,9 @@ public class ContainerEndpointSerializerTest { @Test public void writeReadEndpoints() { - final var endpoints = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b"), OptionalInt.of(3), ApplicationClusterEndpoint.RoutingMethod.shared)); - final var serialized = ContainerEndpointSerializer.endpointListToSlime(endpoints); - final var deserialized = ContainerEndpointSerializer.endpointListFromSlime(serialized); - - assertEquals(endpoints, deserialized); + List<ContainerEndpoint> endpoints = List.of(new ContainerEndpoint("foo", ApplicationClusterEndpoint.Scope.global, List.of("a", "b"), OptionalInt.of(3), + ApplicationClusterEndpoint.RoutingMethod.shared, ApplicationClusterEndpoint.AuthMethod.token)); + assertEquals(endpoints, ContainerEndpointSerializer.endpointListFromSlime(ContainerEndpointSerializer.endpointListToSlime(endpoints))); } } diff --git a/container-core/pom.xml b/container-core/pom.xml index 3df232406ca..549b3ad5953 100644 --- a/container-core/pom.xml +++ b/container-core/pom.xml @@ -290,7 +290,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> <dependency> @@ -373,8 +373,8 @@ <!-- TEST scope --> <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <scope>test</scope> </dependency> <dependency> diff --git a/container-dependencies-enforcer/pom.xml b/container-dependencies-enforcer/pom.xml index 3c991823d5d..fa1212a5b12 100644 --- a/container-dependencies-enforcer/pom.xml +++ b/container-dependencies-enforcer/pom.xml @@ -73,20 +73,19 @@ <include>com.google.guava:failureaccess:[1.0.1, 2):provided</include> <include>com.google.j2objc:j2objc-annotations:[2.8, 3):provided</include> - <include>com.google.inject:guice:jar:no_aop:${guice.vespa.version}:provided</include> + <include>com.google.inject:guice:jar:${guice.vespa.version}:provided</include> <include>com.sun.activation:javax.activation:[1.2.0, 2):provided</include> - <include>com.sun.xml.bind:jaxb-core:${jaxb.vespa.version}:provided</include> - <include>com.sun.xml.bind:jaxb-impl:${jaxb.vespa.version}:provided</include> + <include>com.sun.xml.bind:jaxb-core:${jaxb-core.vespa.version}:provided</include> + <include>com.sun.xml.bind:jaxb-impl:${jaxb-impl.vespa.version}:provided</include> <include>commons-logging:commons-logging:${commons-logging.vespa.version}:provided</include> <include>javax.inject:javax.inject:${javax.inject.vespa.version}:provided</include> <include>javax.servlet:javax.servlet-api:${javax.servlet-api.vespa.version}:provided</include> <include>javax.ws.rs:javax.ws.rs-api:${javax.ws.rs-api.vespa.version}:provided</include> - <include>javax.xml.bind:jaxb-api:${jaxb.vespa.version}:provided</include> + <include>javax.xml.bind:jaxb-api:${jaxb-api.vespa.version}:provided</include> <include>org.slf4j:jcl-over-slf4j:${slf4j.vespa.version}:provided</include> <include>org.slf4j:log4j-over-slf4j:${slf4j.vespa.version}:provided</include> <include>org.slf4j:slf4j-api:${slf4j.vespa.version}:provided</include> <include>org.slf4j:slf4j-jdk14:${slf4j.vespa.version}:provided</include> - <include>xml-apis:xml-apis:${xml-apis.vespa.version}:provided</include> <!-- Vespa provided dependencies --> <include>com.yahoo.vespa:annotations:*:provided</include> @@ -163,6 +162,7 @@ <include>io.prometheus:simpleclient_tracer_common:${prometheus.client.vespa.version}:test</include> <include>io.prometheus:simpleclient_tracer_otel:${prometheus.client.vespa.version}:test</include> <include>io.prometheus:simpleclient_tracer_otel_agent:${prometheus.client.vespa.version}:test</include> + <include>jakarta.inject:jakarta.inject-api:${jakarta.inject.vespa.version}:test</include> <include>junit:junit:${junit4.vespa.version}:test</include> <include>net.java.dev.jna:jna:${jna.vespa.version}:test</include> <include>net.openhft:zero-allocation-hashing:jar:${zero-allocation-hashing.vespa.version}:test</include> @@ -203,8 +203,6 @@ <include>org.hdrhistogram:HdrHistogram:${hdrhistogram.vespa.version}:test</include> <include>org.json:json:${org.json.vespa.version}:test</include> <!-- TODO: Remove on Vespa 9 --> <include>org.lz4:lz4-java:${org.lz4.vespa.version}:test</include> - <include>org.osgi:org.osgi.compendium:[4.1.0, 5):test</include> - <include>org.osgi:org.osgi.core:[4.1.0, 5):test</include> <include>xerces:xercesImpl:${xerces.vespa.version}:test</include> </allowed> </enforceDependencies> diff --git a/container-dependency-versions/pom.xml b/container-dependency-versions/pom.xml index 72af951d965..cf4d0bbe851 100644 --- a/container-dependency-versions/pom.xml +++ b/container-dependency-versions/pom.xml @@ -83,12 +83,6 @@ <version>${guice.vespa.version}</version> </dependency> <dependency> - <groupId>com.google.inject</groupId> - <artifactId>guice</artifactId> - <version>${guice.vespa.version}</version> - <classifier>no_aop</classifier> - </dependency> - <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons-logging.vespa.version}</version> @@ -116,19 +110,19 @@ <!-- TODO Vespa 9: stop exporting/providing and move to parent? --> <groupId>javax.xml.bind</groupId> <artifactId>jaxb-api</artifactId> - <version>${jaxb.vespa.version}</version> + <version>${jaxb-api.vespa.version}</version> </dependency> <dependency> <!-- TODO Vespa 9: stop exporting/providing and move to parent? --> <groupId>com.sun.xml.bind</groupId> <artifactId>jaxb-core</artifactId> - <version>${jaxb.vespa.version}</version> + <version>${jaxb-core.vespa.version}</version> </dependency> <dependency> <!-- TODO Vespa 9: stop exporting/providing and move to parent? --> <groupId>com.sun.xml.bind</groupId> <artifactId>jaxb-impl</artifactId> - <version>${jaxb.vespa.version}</version> + <version>${jaxb-impl.vespa.version}</version> </dependency> <dependency> <!-- Needed by jaxb-api, and possibly guice --> @@ -159,11 +153,6 @@ <artifactId>slf4j-jdk14</artifactId> <version>${slf4j.vespa.version}</version> </dependency> - <dependency> - <groupId>xml-apis</groupId> - <artifactId>xml-apis</artifactId> - <version>${xml-apis.vespa.version}</version> - </dependency> </dependencies> </dependencyManagement> diff --git a/container-dev/pom.xml b/container-dev/pom.xml index 0120a6c82fe..76ed8b1e3d4 100644 --- a/container-dev/pom.xml +++ b/container-dev/pom.xml @@ -33,6 +33,10 @@ <groupId>org.jvnet.hudson</groupId> <artifactId>annotation-indexer</artifactId> </exclusion> + <exclusion> + <groupId>javax.activation</groupId> + <artifactId>javax.activation-api</artifactId> + </exclusion> </exclusions> </dependency> <dependency> @@ -51,7 +55,12 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <exclusions> + <exclusion> + <groupId>jakarta.inject</groupId> + <artifactId>jakarta.inject-api</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <groupId>net.java.dev.jna</groupId> diff --git a/container-messagebus/pom.xml b/container-messagebus/pom.xml index 2ad5633b7dc..38a2c8e2b78 100644 --- a/container-messagebus/pom.xml +++ b/container-messagebus/pom.xml @@ -19,7 +19,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> <dependency> diff --git a/container-search-and-docproc/pom.xml b/container-search-and-docproc/pom.xml index 71d547ecacd..3137fd449a4 100644 --- a/container-search-and-docproc/pom.xml +++ b/container-search-and-docproc/pom.xml @@ -101,7 +101,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> <dependency> diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json index cdb660f294a..57d455a02f0 100644 --- a/container-search/abi-spec.json +++ b/container-search/abi-spec.json @@ -2169,13 +2169,14 @@ ], "methods" : [ "public void <init>(com.yahoo.search.cluster.NodeManager, boolean)", + "public synchronized void reconfigure(java.util.Collection)", "public void start()", "public com.yahoo.search.cluster.MonitorConfiguration getConfiguration()", "public boolean isClosed()", "public void add(java.lang.Object, boolean)", "public synchronized void failed(java.lang.Object, com.yahoo.search.result.ErrorMessage)", "public synchronized void responded(java.lang.Object)", - "public void ping(java.util.concurrent.Executor)", + "public synchronized void ping(java.util.concurrent.Executor)", "public java.util.Iterator nodeMonitorIterator()", "public java.util.List nodeMonitors()", "public void shutdown()" diff --git a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java index 332bf4ea2c4..d81f9079a02 100644 --- a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java +++ b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java @@ -3,16 +3,22 @@ package com.yahoo.search.cluster; import com.yahoo.concurrent.ThreadFactoryFactory; import com.yahoo.search.result.ErrorMessage; +import com.yahoo.yolean.UncheckedInterruptedException; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; import java.util.logging.Level; import java.util.logging.Logger; @@ -36,7 +42,16 @@ public class ClusterMonitor<T> { private final AtomicBoolean closed = new AtomicBoolean(false); /** A map from Node to corresponding MonitoredNode */ - private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new java.util.LinkedHashMap<>()); + private final Map<T, TrafficNodeMonitor<T>> nodeMonitors = Collections.synchronizedMap(new LinkedHashMap<>()); + + // Used during reconfiguration to ensure async RPC calls are complete. + private final Set<T> nodesToRemove = new LinkedHashSet<>(); + + // Used during reconfiguration to ensure all nodes have data. + private final Set<T> nodesToUpdate = new LinkedHashSet<>(); + + // Used for reconfiguration, and during shutdown. + private boolean skipNextWait = false; public ClusterMonitor(NodeManager<T> manager, boolean startPingThread) { nodeManager = manager; @@ -46,6 +61,22 @@ public class ClusterMonitor<T> { } } + /** Updates the monitored set of nodes, and waits for 1. data on new nodes, and 2. RPC completion of removed nodes. */ + public synchronized void reconfigure(Collection<T> nodes) { + if ( ! monitorThread.isAlive()) throw new IllegalStateException("monitor thread must be alive for reconfiguration"); + + nodesToUpdate.addAll(nodes); + nodesToRemove.addAll(nodeMonitors.keySet()); + nodesToRemove.removeAll(nodes); + for (T node : nodes) if ( ! nodeMonitors.containsKey(node)) add(node, true); + + synchronized (nodeManager) { skipNextWait = true; nodeManager.notifyAll(); } + try { while ( ! nodesToRemove.isEmpty() || ! nodesToUpdate.isEmpty()) wait(1); } + catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); } + + nodeManager.pingIterationCompleted(); + } + public void start() { if ( ! monitorThread.isAlive()) { monitorThread.start(); @@ -74,30 +105,48 @@ public class ClusterMonitor<T> { /** Called from ClusterSearcher/NodeManager when a node failed */ public synchronized void failed(T node, ErrorMessage error) { - if (closed.get()) return; // Do not touch state if close has started. - TrafficNodeMonitor<T> monitor = nodeMonitors.get(node); - Boolean wasWorking = monitor.isKnownWorking(); - monitor.failed(error); - if (wasWorking != monitor.isKnownWorking()) - nodeManager.failed(node); + updateMonitoredNode(node, monitor -> monitor.failed(error), nodeManager::failed); } /** Called when a node responded */ public synchronized void responded(T node) { - if (closed.get()) return; // Do not touch state if close has started. + updateMonitoredNode(node, TrafficNodeMonitor::responded, nodeManager::working); + } + + private void updateMonitoredNode(T node, Consumer<TrafficNodeMonitor<T>> monitorUpdate, Consumer<T> nodeUpdate) { TrafficNodeMonitor<T> monitor = nodeMonitors.get(node); - Boolean wasWorking = monitor.isKnownWorking(); - monitor.responded(); - if (wasWorking != monitor.isKnownWorking()) - nodeManager.working(node); + + // Don't touch state during shutdown. + if (closed.get()) monitor = null; + + // Node was removed during reconfiguration, and should no longer be monitored. + if (nodesToRemove.remove(node)) { + nodeMonitors.remove(node); + monitor = null; + } + + // Update monitor state only when it actually changes. + if (monitor != null) { + Boolean wasWorking = monitor.isKnownWorking(); + monitorUpdate.accept(monitor); + if (wasWorking != monitor.isKnownWorking()) + nodeUpdate.accept(node); + } + + // If the node was added in a recent reconfiguration, we now have its required data. + nodesToUpdate.remove(node); } /** * Ping all nodes which needs pinging to discover state changes */ - public void ping(Executor executor) { + public synchronized void ping(Executor executor) { for (var monitor : nodeMonitors()) { if (closed.get()) return; // Do nothing to change state if close has started. + if (nodesToRemove.remove(monitor.getNode())) { + nodeMonitors.remove(monitor.getNode()); + continue; + } nodeManager.ping(this, monitor.getNode(), executor); } nodeManager.pingIterationCompleted(); @@ -120,6 +169,7 @@ public class ClusterMonitor<T> { nodeMonitors.clear(); } synchronized (nodeManager) { + skipNextWait = true; nodeManager.notifyAll(); } try { @@ -148,7 +198,9 @@ public class ClusterMonitor<T> { log.finest("Activating ping"); ping(pingExecutor); synchronized (nodeManager) { - nodeManager.wait(configuration.getCheckInterval()); + if ( ! skipNextWait) + nodeManager.wait(configuration.getCheckInterval()); + skipNextWait = false; } } catch (Throwable e) { diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java index 6f6b0fc2b79..43d0e08886d 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java @@ -60,20 +60,19 @@ public class Dispatcher extends AbstractComponent { private final DispatchConfig dispatchConfig; private final RpcConnectionPool rpcResourcePool; private final SearchCluster searchCluster; + private final ClusterMonitor<Node> clusterMonitor; private volatile VolatileItems volatileItems; private static class VolatileItems { final LoadBalancer loadBalancer; final InvokerFactory invokerFactory; - final ClusterMonitor<Node> clusterMonitor; final AtomicInteger inflight = new AtomicInteger(1); // Initial reference. Runnable cleanup = () -> { }; - VolatileItems(LoadBalancer loadBalancer, InvokerFactory invokerFactory, ClusterMonitor<Node> clusterMonitor) { + VolatileItems(LoadBalancer loadBalancer, InvokerFactory invokerFactory) { this.loadBalancer = loadBalancer; this.invokerFactory = invokerFactory; - this.clusterMonitor = clusterMonitor; } private void countDown() { @@ -121,14 +120,14 @@ public class Dispatcher extends AbstractComponent { DispatchNodesConfig nodesConfig, VipStatus vipStatus, InvokerFactoryFactory invokerFactories) { this(dispatchConfig, rpcConnectionPool, new SearchCluster(clusterId.stringValue(), dispatchConfig.minActivedocsPercentage(), - toNodes(nodesConfig), vipStatus, new RpcPingFactory(rpcConnectionPool)), + toNodes(clusterId.stringValue(), nodesConfig), vipStatus, new RpcPingFactory(rpcConnectionPool)), invokerFactories); } Dispatcher(DispatchConfig dispatchConfig, RpcConnectionPool rpcConnectionPool, SearchCluster searchCluster, InvokerFactoryFactory invokerFactories) { this(dispatchConfig, rpcConnectionPool, searchCluster, new ClusterMonitor<>(searchCluster, false), invokerFactories); - this.volatileItems.clusterMonitor.start(); // Populate nodes to monitor before starting it. + this.clusterMonitor.start(); // Populate nodes to monitor before starting it. } Dispatcher(DispatchConfig dispatchConfig, RpcConnectionPool rpcConnectionPool, @@ -137,7 +136,8 @@ public class Dispatcher extends AbstractComponent { this.rpcResourcePool = rpcConnectionPool; this.searchCluster = searchCluster; this.invokerFactories = invokerFactories; - this.volatileItems = update(clusterMonitor); + this.clusterMonitor = clusterMonitor; + this.volatileItems = update(); searchCluster.addMonitoring(clusterMonitor); } @@ -171,7 +171,7 @@ public class Dispatcher extends AbstractComponent { * 3. The load balancer is owned by the volatile snapshot, and is swapped atomically with it; * it is used internally by the dispatcher to select search nodes for queries, and is discarded with its snapshot. * 4. The cluster monitor is a subordinate to the search cluster, and does whatever that tells it to, at any time; - * it is technically owned by the volatile snapshot, but mostly to show it is swapped together with that. + * it is technically owned by the dispatcher, but in updated by the search cluster, when that is updated. * 5. The search cluster is owned by the dispatcher, and is updated on node set changes; * its responsibility is to keep track of the state of the backend, and to provide a view of it to the dispatcher, * as well as keep the container vip status updated accordingly; it should therefore preserve as much as possible @@ -180,7 +180,7 @@ public class Dispatcher extends AbstractComponent { * under the assumption that this is the common case, i.e., new nodes have no documents yet. */ void updateWithNewConfig(DispatchNodesConfig nodesConfig) { - try (var items = volatileItems()) { // Marking a reference to the old snapshot, which we want to have cleaned up. + try (var items = volatileItems()) { // Mark a reference to the old snapshot, which we want to have cleaned up. items.get().countDown(); // Decrement for its initial creation reference, so it may reach 0. // Let the RPC pool know about the new nodes, and set up the delayed cleanup that we need to do. @@ -192,21 +192,16 @@ public class Dispatcher extends AbstractComponent { }; // Update the nodes the search cluster keeps track of, and what nodes are monitored. - ClusterMonitor<Node> newMonitor = searchCluster.updateNodes(toNodes(nodesConfig), dispatchConfig.minActivedocsPercentage()); + searchCluster.updateNodes(toNodes(searchCluster.name(), nodesConfig), clusterMonitor, dispatchConfig.minActivedocsPercentage()); // Update the snapshot to use the new nodes set in the search cluster; the RPC pool is ready for this. - this.volatileItems = update(newMonitor); - - // Wait for the old cluster monitor to die; it may be pinging nodes we want to shut down RPC connections to. - items.get().clusterMonitor.shutdown(); + this.volatileItems = update(); } // Close the old snapshot, which may trigger the RPC cleanup now, or when the last invoker is closed, by a search thread. } - private VolatileItems update(ClusterMonitor<Node> clusterMonitor) { - var items = new VolatileItems(new LoadBalancer(searchCluster.groupList().groups(), toLoadBalancerPolicy(dispatchConfig.distributionPolicy())), - invokerFactories.create(rpcResourcePool, searchCluster.groupList(), dispatchConfig), - clusterMonitor); - return items; + private VolatileItems update() { + return new VolatileItems(new LoadBalancer(searchCluster.groupList().groups(), toLoadBalancerPolicy(dispatchConfig.distributionPolicy())), + invokerFactories.create(rpcResourcePool, searchCluster.groupList(), dispatchConfig)); } private void initialWarmup(double warmupTime) { @@ -234,9 +229,9 @@ public class Dispatcher extends AbstractComponent { case LATENCY_AMORTIZED_OVER_TIME -> LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_TIME; }; } - private static List<Node> toNodes(DispatchNodesConfig nodesConfig) { + private static List<Node> toNodes(String clusterName, DispatchNodesConfig nodesConfig) { return nodesConfig.node().stream() - .map(n -> new Node(n.key(), n.host(), n.group())) + .map(n -> new Node(clusterName, n.key(), n.host(), n.group())) .toList(); } @@ -255,7 +250,7 @@ public class Dispatcher extends AbstractComponent { @Override public void deconstruct() { // The clustermonitor must be shutdown first as it uses the invokerfactory through the searchCluster. - volatileItems.clusterMonitor.shutdown(); + clusterMonitor.shutdown(); if (rpcResourcePool != null) { rpcResourcePool.close(); } diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java index c86c21d677f..2962fd3a3ec 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java @@ -1,5 +1,6 @@ package com.yahoo.search.dispatch; +import ai.vespa.cloud.SystemInfo; import com.yahoo.component.ComponentId; import com.yahoo.component.annotation.Inject; import com.yahoo.config.subscription.ConfigSubscriber; @@ -20,12 +21,12 @@ public class ReconfigurableDispatcher extends Dispatcher { private final ConfigSubscriber subscriber; @Inject - public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, QrConfig qrConfig, VipStatus vipStatus) { + public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, SystemInfo systemInfo, VipStatus vipStatus) { super(clusterId, dispatchConfig, new DispatchNodesConfig.Builder().build(), vipStatus); this.subscriber = new ConfigSubscriber(); CountDownLatch configured = new CountDownLatch(1); this.subscriber.subscribe(config -> { updateWithNewConfig(config); configured.countDown(); }, - DispatchNodesConfig.class, configId(clusterId, qrConfig)); + DispatchNodesConfig.class, configId(clusterId, systemInfo)); try { if ( ! configured.await(1, TimeUnit.MINUTES)) throw new IllegalStateException("timed out waiting for initial dispatch nodes config for " + clusterId.getName()); @@ -41,8 +42,8 @@ public class ReconfigurableDispatcher extends Dispatcher { super.deconstruct(); } - private static String configId(ComponentId clusterId, QrConfig qrConfig) { - return qrConfig.clustername() + "/component/" + clusterId.getName(); + private static String configId(ComponentId clusterId, SystemInfo systemInfo) { + return systemInfo.clusterName() + "/component/" + clusterId.getName(); } } diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java index aeb04bfb141..31e02f910ee 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java @@ -12,6 +12,7 @@ import java.util.concurrent.atomic.AtomicLong; */ public class Node { + private final String clusterName; private final int key; private final String hostname; private final int group; @@ -25,7 +26,8 @@ public class Node { private volatile boolean working = true; private volatile boolean isBlockingWrites = false; - public Node(int key, String hostname, int group) { + public Node(String clusterName, int key, String hostname, int group) { + this.clusterName = clusterName; this.key = key; this.hostname = hostname; this.group = group; @@ -33,7 +35,7 @@ public class Node { /** Give a monotonically increasing sequence number.*/ public long createPingSequenceId() { return pingSequence.incrementAndGet(); } - /** Checks if this pong is received in line and accepted, or out of band and should be ignored..*/ + /** Checks if this pong is received in line and accepted, or out of band and should be ignored. */ public boolean isLastReceivedPong(long pingId ) { long last = lastPong.get(); while ((pingId > last) && ! lastPong.compareAndSet(last, pingId)) { @@ -103,8 +105,8 @@ public class Node { @Override public String toString() { - return "search node key = " + key + " hostname = "+ hostname + " path = " + pathIndex + " in group " + group + - " statusIsKnown = " + statusIsKnown + " working = " + working + + return "search node in cluster = " + clusterName + " key = " + key + " hostname = "+ hostname + + " path = " + pathIndex + " in group " + group + " statusIsKnown = " + statusIsKnown + " working = " + working + " activeDocs = " + getActiveDocuments() + " targetActiveDocs = " + getTargetActiveDocuments(); } diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java index 3c8950f1f7f..f7a77ebf963 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java @@ -6,11 +6,10 @@ import com.yahoo.net.HostName; import com.yahoo.prelude.Pong; import com.yahoo.search.cluster.ClusterMonitor; import com.yahoo.search.cluster.NodeManager; -import com.yahoo.yolean.UncheckedInterruptedException; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -18,6 +17,7 @@ import java.util.concurrent.Executor; import java.util.logging.Logger; import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toMap; /** * A model of a search cluster we might want to dispatch queries to. @@ -42,7 +42,7 @@ public class SearchCluster implements NodeManager<Node> { * if it only queries this cluster when the local node cannot be used, to avoid unnecessary * cross-node network traffic. */ - private final Node localCorpusDispatchTarget; + private volatile Node localCorpusDispatchTarget; public SearchCluster(String clusterId, double minActivedocsPercentage, Collection<Node> nodes, VipStatus vipStatus, PingFactory pingFactory) { @@ -61,22 +61,19 @@ public class SearchCluster implements NodeManager<Node> { public String name() { return clusterId; } /** Sets the new nodes to monitor to be the new nodes, but keep any existing node instances which equal the new ones. */ - public ClusterMonitor<Node> updateNodes(Collection<Node> newNodes, double minActivedocsPercentage) { - Collection<Node> retainedNodes = groups.nodes(); - Collection<Node> currentNodes = new HashSet<>(newNodes); - retainedNodes.retainAll(currentNodes); // Throw away all old nodes which are not in the new set. - currentNodes.removeIf(retainedNodes::contains); // Throw away all new nodes for which we have more information in an old object. - Collection<Node> addedNodes = List.copyOf(currentNodes); - currentNodes.addAll(retainedNodes); // Keep the old nodes that were replaced in the new set. + public void updateNodes(Collection<Node> newNodes, ClusterMonitor<Node> monitor, double minActivedocsPercentage) { + List<Node> currentNodes = new ArrayList<>(newNodes); + List<Node> addedNodes = new ArrayList<>(); + Map<Node, Node> retainedNodes = groups.nodes().stream().collect(toMap(node -> node, node -> node)); + for (int i = 0; i < currentNodes.size(); i++) { + Node retained = retainedNodes.get(currentNodes.get(i)); + if (retained != null) currentNodes.set(i, retained); + else addedNodes.add(currentNodes.get(i)); + } SearchGroupsImpl groups = toGroups(currentNodes, minActivedocsPercentage); - ClusterMonitor<Node> monitor = new ClusterMonitor<>(this, false); - for (Node node : groups.nodes()) monitor.add(node, true); - monitor.start(); - try { while (addedNodes.stream().anyMatch(node -> node.isWorking() == null)) { Thread.sleep(1); } } - catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); } - pingIterationCompleted(groups); + this.localCorpusDispatchTarget = findLocalCorpusDispatchTarget(HostName.getLocalhost(), groups); + monitor.reconfigure(groups.nodes()); this.groups = groups; - return monitor; } public void addMonitoring(ClusterMonitor<Node> clusterMonitor) { @@ -139,6 +136,7 @@ public class SearchCluster implements NodeManager<Node> { } private void updateWorkingState(Node node, boolean isWorking) { + log.fine(() -> "Updating working state of " + node + " to " + isWorking); node.setWorking(isWorking); updateVipStatusOnNodeChange(node, isWorking); } @@ -214,6 +212,7 @@ public class SearchCluster implements NodeManager<Node> { /** Used by the cluster monitor to manage node status */ @Override public void ping(ClusterMonitor<Node> clusterMonitor, Node node, Executor executor) { + log.fine(() -> "Pinging " + node); Pinger pinger = pingFactory.createPinger(node, clusterMonitor, new PongCallback(node, clusterMonitor)); pinger.ping(); } @@ -300,6 +299,7 @@ public class SearchCluster implements NodeManager<Node> { @Override public void handle(Pong pong) { + log.fine(() -> "Got pong from " + node + ": " + pong); if (pong.badResponse()) { clusterMonitor.failed(node, pong.error().get()); } else { diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java index 7a63eb07641..fb483a8eb7b 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java @@ -30,7 +30,6 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; @@ -83,7 +82,7 @@ public class FastSearcherTestCase { @Test void testSinglePassGroupingIsForcedWithSingleNodeGroups() { FastSearcher fastSearcher = new FastSearcher("container.0", - MockDispatcher.create(List.of(new Node(0, "host0", 0))), + MockDispatcher.create(List.of(new Node("test", 0, "host0", 0))), new SummaryParameters(null), new ClusterParams("testhittype"), documentdbInfoConfig("test"), @@ -106,7 +105,7 @@ public class FastSearcherTestCase { @Test void testRankProfileValidation() { FastSearcher fastSearcher = new FastSearcher("container.0", - MockDispatcher.create(List.of(new Node(0, "host0", 0))), + MockDispatcher.create(List.of(new Node("test", 0, "host0", 0))), new SummaryParameters(null), new ClusterParams("testhittype"), documentdbInfoConfig("test"), @@ -125,7 +124,7 @@ public class FastSearcherTestCase { .setHasSummaryFeatures(false) .build()); FastSearcher backend = new FastSearcher("container.0", - MockDispatcher.create(Collections.singletonList(new Node(0, "host0", 0))), + MockDispatcher.create(Collections.singletonList(new Node("test", 0, "host0", 0))), new SummaryParameters(null), new ClusterParams("testhittype"), documentDb, @@ -142,7 +141,7 @@ public class FastSearcherTestCase { @Test void testSinglePassGroupingIsNotForcedWithSingleNodeGroups() { - MockDispatcher dispatcher = MockDispatcher.create(List.of(new Node(0, "host0", 0), new Node(2, "host1", 0))); + MockDispatcher dispatcher = MockDispatcher.create(List.of(new Node("test", 0, "host0", 0), new Node("test", 2, "host1", 0))); FastSearcher fastSearcher = new FastSearcher("container.0", dispatcher, @@ -184,7 +183,7 @@ public class FastSearcherTestCase { searchClusterB.name(clusterName); b.searchcluster(searchClusterB); VipStatus vipStatus = new VipStatus(b.build()); - List<Node> nodes_1 = List.of(new Node(0, "host0", 0)); + List<Node> nodes_1 = List.of(new Node("test", 0, "host0", 0)); RpcResourcePool rpcPool_1 = new RpcResourcePool(MockDispatcher.toDispatchConfig(), MockDispatcher.toNodesConfig(nodes_1)); MockDispatcher dispatch_1 = MockDispatcher.create(nodes_1, rpcPool_1, vipStatus); dispatch_1.clusterMonitor.shutdown(); diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java index 1278afe3759..d0f1f46d6ea 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java @@ -21,6 +21,7 @@ import com.yahoo.search.dispatch.searchcluster.SearchGroups; import com.yahoo.search.searchchain.Execution; import com.yahoo.vespa.config.search.DispatchConfig; import com.yahoo.vespa.config.search.DispatchNodesConfig; +import com.yahoo.yolean.UncheckedInterruptedException; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -36,6 +37,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.Phaser; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -77,7 +79,7 @@ public class DispatcherTest { SearchCluster cl = new MockSearchCluster("1", 0, 0) { @Override public Optional<Node> localCorpusDispatchTarget() { - return Optional.of(new Node(1, "test", 1)); + return Optional.of(new Node("test", 1, "test", 1)); } }; MockInvokerFactory invokerFactory = new MockInvokerFactory(cl.groupList(), dispatchConfig, (n, a) -> true); @@ -181,7 +183,11 @@ public class DispatcherTest { pingPhasers.put(1, new Phaser(2)); pingPhasers.put(2, new Phaser(2)); + AtomicBoolean doPing = new AtomicBoolean(); + PingFactory pingFactory = (node, monitor, pongHandler) -> () -> { + try { while ( ! doPing.getAndSet(false)) { monitor.wait(1); } } // Need to avoid hogging monitor lock while waiting for phaser. + catch (InterruptedException e) { throw new UncheckedInterruptedException(e, true); } pingPhasers.get(node.key()).arriveAndAwaitAdvance(); pongHandler.handle(new Pong(2, 2)); pingPhasers.get(node.key()).arriveAndAwaitAdvance(); @@ -255,8 +261,8 @@ public class DispatcherTest { Dispatcher dispatcher = new Dispatcher(dispatchConfig, rpcPool, cluster, invokerFactories); ExecutorService executor = Executors.newFixedThreadPool(1); - // Set two groups with a single node each. The first cluster-monitor has nothing to do, and is shut down immediately. - // There are also no invokers, so the whole reconfiguration completes once the new cluster monitor has seen all nodes. + // Set two groups with a single node each. + // There are no invokers, so the whole reconfiguration completes once the cluster monitor has seen all the new nodes. Future<?> reconfiguration = executor.submit(() -> { dispatcher.updateWithNewConfig(new DispatchNodesConfig.Builder() .node(new DispatchNodesConfig.Node.Builder().key(0).group(0).port(123).host("host0")) @@ -265,8 +271,10 @@ public class DispatcherTest { }); // Let pings return, to allow the search cluster to reconfigure. + doPing.set(true); pingPhasers.get(0).arriveAndAwaitAdvance(); pingPhasers.get(0).arriveAndAwaitAdvance(); + doPing.set(true); pingPhasers.get(1).arriveAndAwaitAdvance(); pingPhasers.get(1).arriveAndAwaitAdvance(); // We need to wait for the cluster to have at least one group, lest dispatch will fail below. @@ -287,9 +295,10 @@ public class DispatcherTest { search1.search(new Query(), null); // Wait for the current cluster monitor to be mid-ping-round. + doPing.set(true); pingPhasers.get(0).arriveAndAwaitAdvance(); - // Then reconfigure the dispatcher with new nodes, replacing node0 with node2. + // Reconfigure the dispatcher with new nodes, removing node0 and adding node2. reconfiguration = executor.submit(() -> { dispatcher.updateWithNewConfig(new DispatchNodesConfig.Builder() .node(new DispatchNodesConfig.Node.Builder().key(2).group(0).port(123).host("host2")) @@ -297,16 +306,23 @@ public class DispatcherTest { .build()); }); // Reconfiguration starts, but groups are only updated once the search cluster has knowledge about all of them. + pingPhasers.get(0).arriveAndAwaitAdvance(); // Ping for node to remove completes. + doPing.set(true); + pingPhasers.get(1).arriveAndAwaitAdvance(); // Ping for node to keep completes. pingPhasers.get(1).arriveAndAwaitAdvance(); + // New round of pings starts, with nodes 1 and 2. + doPing.set(true); pingPhasers.get(1).arriveAndAwaitAdvance(); - pingPhasers.get(2).arriveAndAwaitAdvance(); + pingPhasers.get(1).arriveAndAwaitAdvance(); + // Cluster has not yet updated its group reference. assertEquals(1, cluster.group(0).workingNodes()); // Node0 is still working. assertSame(node0, cluster.group(0).nodes().get(0)); + + doPing.set(true); + pingPhasers.get(2).arriveAndAwaitAdvance(); pingPhasers.get(2).arriveAndAwaitAdvance(); - // Old cluster monitor is waiting for that ping to complete before it can shut down, and let reconfiguration complete. - pingPhasers.get(0).arriveAndAwaitAdvance(); reconfiguration.get(); Node node2 = cluster.group(0).nodes().get(0); assertNotSame(node0, node2); diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java index 688cdffe22d..500201df26f 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java @@ -238,8 +238,8 @@ public class InterleavedSearchInvokerTest { @Test void requireThatTopKProbabilityOverrideIsDisabledOnContentSkew() throws IOException { - Node node0 = new Node(0, "host0", 0); - Node node1 = new Node(1, "host1", 0); + Node node0 = new Node("test", 0, "host0", 0); + Node node1 = new Node("test", 1, "host1", 0); Group group = new Group(0, List.of(node0, node1)); node0.setActiveDocuments(1000000); @@ -250,8 +250,8 @@ public class InterleavedSearchInvokerTest { @Test void requireThatTopKProbabilityOverrideIsDisabledOnLittleContent() throws IOException { - Node node0 = new Node(0, "host0", 0); - Node node1 = new Node(1, "host1", 0); + Node node0 = new Node("test", 0, "host0", 0); + Node node1 = new Node("test", 1, "host1", 0); Group group = new Group(0, List.of(node0, node1)); node0.setActiveDocuments(10); diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java index 4956698cc2f..b57d97ebb84 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java @@ -28,7 +28,7 @@ public class LoadBalancerTest { private static final double delta = 0.0000001; @Test void requireThatLoadBalancerServesSingleNodeSetups() { - Node n1 = new Node(0, "test-node1", 0); + Node n1 = new Node("test", 0, "test-node1", 0); LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1))), LoadBalancer.Policy.ROUNDROBIN); Optional<Group> grp = lb.takeGroup(null); @@ -40,8 +40,8 @@ public class LoadBalancerTest { @Test void requireThatLoadBalancerServesMultiGroupSetups() { - Node n1 = new Node(0, "test-node1", 0); - Node n2 = new Node(1, "test-node2", 1); + Node n1 = new Node("test", 0, "test-node1", 0); + Node n2 = new Node("test", 1, "test-node2", 1); LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1)), new Group(1,List.of(n2))), LoadBalancer.Policy.ROUNDROBIN); Optional<Group> grp = lb.takeGroup(null); @@ -53,10 +53,10 @@ public class LoadBalancerTest { @Test void requireThatLoadBalancerServesClusteredGroups() { - Node n1 = new Node(0, "test-node1", 0); - Node n2 = new Node(1, "test-node2", 0); - Node n3 = new Node(0, "test-node3", 1); - Node n4 = new Node(1, "test-node4", 1); + Node n1 = new Node("test", 0, "test-node1", 0); + Node n2 = new Node("test", 1, "test-node2", 0); + Node n3 = new Node("test", 0, "test-node3", 1); + Node n4 = new Node("test", 1, "test-node4", 1); LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1,n2)), new Group(1,List.of(n3,n4))), LoadBalancer.Policy.ROUNDROBIN); Optional<Group> grp = lb.takeGroup(null); @@ -65,8 +65,8 @@ public class LoadBalancerTest { @Test void requireThatLoadBalancerReturnsDifferentGroups() { - Node n1 = new Node(0, "test-node1", 0); - Node n2 = new Node(1, "test-node2", 1); + Node n1 = new Node("test", 0, "test-node1", 0); + Node n2 = new Node("test", 1, "test-node2", 1); LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1)), new Group(1,List.of(n2))), LoadBalancer.Policy.ROUNDROBIN); // get first group diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java index aca84386af7..b47015c08c6 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java @@ -3,17 +3,13 @@ package com.yahoo.search.dispatch; import com.yahoo.prelude.fastsearch.FastHit; import com.yahoo.search.Query; -import com.yahoo.search.Result; -import com.yahoo.search.dispatch.searchcluster.Group; import com.yahoo.search.dispatch.searchcluster.Node; import com.yahoo.search.result.Coverage; import com.yahoo.search.result.Hit; import com.yahoo.search.searchchain.Execution; -import java.io.IOException; import java.util.List; import java.util.Optional; -import java.util.OptionalInt; class MockInvoker extends SearchInvoker { @@ -23,7 +19,7 @@ class MockInvoker extends SearchInvoker { int hitsRequested; protected MockInvoker(int key, Coverage coverage) { - super(Optional.of(new Node(key, "?", 0))); + super(Optional.of(new Node("test", key, "?", 0))); this.coverage = coverage; } diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/rpc/RpcSearchInvokerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/rpc/RpcSearchInvokerTest.java index 7c1e7372507..b877bac5d74 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/rpc/RpcSearchInvokerTest.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/rpc/RpcSearchInvokerTest.java @@ -33,7 +33,7 @@ public class RpcSearchInvokerTest { var lengthHolder = new AtomicInteger(); var mockClient = parameterCollectorClient(compressionTypeHolder, payloadHolder, lengthHolder); var mockPool = new RpcResourcePool(ImmutableMap.of(7, mockClient.createConnection("foo", 123))); - var invoker = new RpcSearchInvoker(mockSearcher(), compressor, new Node(7, "seven", 1), mockPool, 1000); + var invoker = new RpcSearchInvoker(mockSearcher(), compressor, new Node("test", 7, "seven", 1), mockPool, 1000); Query q = new Query("search/?query=test&hits=10&offset=3"); RpcSearchInvoker.RpcContext context = (RpcSearchInvoker.RpcContext) invoker.sendSearchRequest(q, null); @@ -47,7 +47,7 @@ public class RpcSearchInvokerTest { assertEquals(3, request.getOffset()); assertTrue(request.getQueryTreeBlob().size() > 0); - var invoker2 = new RpcSearchInvoker(mockSearcher(), compressor, new Node(8, "eight", 1), mockPool, 1000); + var invoker2 = new RpcSearchInvoker(mockSearcher(), compressor, new Node("test", 8, "eight", 1), mockPool, 1000); RpcSearchInvoker.RpcContext context2 = (RpcSearchInvoker.RpcContext) invoker2.sendSearchRequest(q, context); assertSame(context, context2); assertEquals(lengthHolder.get(), context.compressedPayload.uncompressedSize()); @@ -62,7 +62,7 @@ public class RpcSearchInvokerTest { var lengthHolder = new AtomicInteger(); var mockClient = parameterCollectorClient(compressionTypeHolder, payloadHolder, lengthHolder); var mockPool = new RpcResourcePool(ImmutableMap.of(7, mockClient.createConnection("foo", 123))); - var invoker = new RpcSearchInvoker(mockSearcher(), compressor, new Node(7, "seven", 1), mockPool, maxHits); + var invoker = new RpcSearchInvoker(mockSearcher(), compressor, new Node("test", 7, "seven", 1), mockPool, maxHits); Query q = new Query("search/?query=test&hits=10&offset=3"); invoker.sendSearchRequest(q, null); diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/MockSearchCluster.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/MockSearchCluster.java index cd0791a3881..6900cc5dd52 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/MockSearchCluster.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/MockSearchCluster.java @@ -79,7 +79,7 @@ public class MockSearchCluster extends SearchCluster { for (int group = 0; group < numGroups; group++) { List<Node> groupNodes = new ArrayList<>(); for (int i = 0; i < nodesPerGroup; i++) { - Node node = new Node(distributionKey, "host" + distributionKey, group); + Node node = new Node("test", distributionKey, "host" + distributionKey, group); node.setWorking(true); groupNodes.add(node); distributionKey++; diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java index bfe1aed1084..c3ddeac5365 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java @@ -6,12 +6,14 @@ import com.yahoo.container.handler.ClustersStatus; import com.yahoo.container.handler.VipStatus; import com.yahoo.net.HostName; import com.yahoo.prelude.Pong; +import com.yahoo.search.cluster.BaseNodeMonitor; import com.yahoo.search.cluster.ClusterMonitor; import com.yahoo.search.result.ErrorMessage; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -19,8 +21,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.IntStream; import static java.util.function.Function.identity; @@ -58,7 +58,7 @@ public class SearchClusterTest { for (String name : nodeNames) { int key = nodes.size() % nodesPerGroup; int group = nodes.size() / nodesPerGroup; - nodes.add(new Node(key, name, group)); + nodes.add(new Node("test", key, name, group)); numDocsPerNode.add(new AtomicInteger(1)); pingCounts.add(new AtomicInteger(0)); } @@ -326,7 +326,7 @@ public class SearchClusterTest { @Test void requireThatPingSequenceIsUpHeld() { - Node node = new Node(1, "n", 1); + Node node = new Node("test", 1, "n", 1); assertEquals(1, node.createPingSequenceId()); assertEquals(2, node.createPingSequenceId()); assertEquals(0, node.getLastReceivedPongId()); @@ -348,7 +348,7 @@ public class SearchClusterTest { @Test void requireThatSingleNodeGroupIsInBalance() { - Group group = new Group(0, List.of(new Node(1, "n", 1))); + Group group = new Group(0, List.of(new Node("test", 1, "n", 1))); group.nodes().forEach(node -> node.setWorking(true)); assertTrue(group.isBalanced()); group.aggregateNodeValues(); @@ -360,7 +360,7 @@ public class SearchClusterTest { @Test void requireThatMultiNodeGroupDetectsBalance() { - Group group = new Group(0, List.of(new Node(1, "n1", 1), new Node(2, "n2", 1))); + Group group = new Group(0, List.of(new Node("test", 1, "n1", 1), new Node("test", 2, "n2", 1))); assertTrue(group.isBalanced()); group.nodes().forEach(node -> node.setWorking(true)); assertTrue(group.isBalanced()); @@ -386,33 +386,48 @@ public class SearchClusterTest { @Test void requireThatPreciselyTheRetainedNodesAreKeptWhenNodesAreUpdated() { try (State state = new State("query", 2, IntStream.range(0, 6).mapToObj(i -> "node-" + i).toList())) { - List<Node> referenceNodes = List.of(new Node(0, "node-0", 0), - new Node(1, "node-1", 0), - new Node(0, "node-2", 1), - new Node(1, "node-3", 1), - new Node(0, "node-4", 2), - new Node(1, "node-5", 2)); + state.clusterMonitor.start(); + List<Node> referenceNodes = List.of(new Node("test", 0, "node-0", 0), + new Node("test", 1, "node-1", 0), + new Node("test", 0, "node-2", 1), + new Node("test", 1, "node-3", 1), + new Node("test", 0, "node-4", 2), + new Node("test", 1, "node-5", 2)); SearchGroups oldGroups = state.searchCluster.groupList(); assertEquals(Set.copyOf(referenceNodes), oldGroups.nodes()); - - List<Node> updatedNodes = List.of(new Node(0, "node-1", 0), // Swap node-0 and node-1 - new Node(1, "node-0", 0), // Swap node-1 and node-0 - new Node(0, "node-4", 1), // Swap node-2 and node-4 - new Node(1, "node-3", 1), - new Node(0, "node-2", 2), // Swap node-4 and node-2 - new Node(1, "node-6", 2)); // Replace node-6 - state.searchCluster.updateNodes(updatedNodes, 100.0); + List<BaseNodeMonitor<Node>> oldMonitors = state.clusterMonitor.nodeMonitors(); + + List<Node> updatedNodes = List.of(new Node("test", 0, "node-1", 0), // Swap node-0 and node-1 + new Node("test", 1, "node-0", 0), // Swap node-1 and node-0 + new Node("test", 0, "node-4", 1), // Swap node-2 and node-4 + new Node("test", 1, "node-3", 1), + new Node("test", 0, "node-2", 2), // Swap node-4 and node-2 + new Node("test", 1, "node-6", 2)); // Replace node-6 + state.searchCluster.updateNodes(updatedNodes, state.clusterMonitor, 100.0); SearchGroups newGroups = state.searchCluster.groupList(); assertEquals(Set.copyOf(updatedNodes), newGroups.nodes()); - Map<Node, Node> oldNodesByIdentity = newGroups.nodes().stream().collect(toMap(identity(), identity())); + Map<Node, Node> oldNodesByIdentity = oldGroups.nodes().stream().collect(toMap(identity(), identity())); Map<Node, Node> newNodesByIdentity = newGroups.nodes().stream().collect(toMap(identity(), identity())); assertSame(updatedNodes.get(0), newNodesByIdentity.get(updatedNodes.get(0))); assertSame(updatedNodes.get(1), newNodesByIdentity.get(updatedNodes.get(1))); assertSame(updatedNodes.get(2), newNodesByIdentity.get(updatedNodes.get(2))); - assertSame(oldNodesByIdentity.get(referenceNodes.get(3)), newNodesByIdentity.get(updatedNodes.get(3))); + assertSame(oldNodesByIdentity.get(updatedNodes.get(3)), newNodesByIdentity.get(updatedNodes.get(3))); assertSame(updatedNodes.get(4), newNodesByIdentity.get(updatedNodes.get(4))); assertSame(updatedNodes.get(5), newNodesByIdentity.get(updatedNodes.get(5))); + + // Also verify search-path index within group follows node order, as given by config. + int[] pathIndexWithinGroup = new int[3]; + for (Node node : updatedNodes) + assertEquals(pathIndexWithinGroup[node.group()]++, newNodesByIdentity.get(node).pathIndex(), + "search path index within group should follow updated node order for: " + node); + + // Precisely the one retained node keeps its monitor through reconfiguration. + Set<BaseNodeMonitor<Node>> retainedMonitors = new HashSet<>(state.clusterMonitor.nodeMonitors()); + assertEquals(6, retainedMonitors.size()); + retainedMonitors.retainAll(oldMonitors); + assertEquals(1, retainedMonitors.size()); + assertSame(oldNodesByIdentity.get(updatedNodes.get(3)), retainedMonitors.iterator().next().getNode()); } } diff --git a/container-test/pom.xml b/container-test/pom.xml index a22d0b59ace..9c6fe8025a2 100644 --- a/container-test/pom.xml +++ b/container-test/pom.xml @@ -115,6 +115,11 @@ <groupId>org.lz4</groupId> <artifactId>lz4-java</artifactId> </dependency> + <dependency> + <groupId>jakarta.inject</groupId> + <artifactId>jakarta.inject-api</artifactId> + <version>${jakarta.inject.vespa.version}</version> + </dependency> <!-- START JETTY embedded jars --> <dependency> diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Bill.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Bill.java index 9c29f5c30f4..664669d8e55 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Bill.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Bill.java @@ -122,7 +122,7 @@ public class Bill { public BigDecimal sumAdditionalCost() { // anything that is not covered by the cost for resources is "additional" costs - var resourceCosts = sumCpuCost().add(sumMemoryCost()).add(sumDiskCost()); + var resourceCosts = sumCpuCost().add(sumMemoryCost()).add(sumDiskCost()).add(sumGpuCost()); return sum().subtract(resourceCosts); } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java index 4746fa2da26..a0d4fd03117 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java @@ -5,7 +5,6 @@ import com.yahoo.config.provision.zone.AuthMethod; import com.yahoo.config.provision.zone.RoutingMethod; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.OptionalInt; @@ -19,21 +18,21 @@ import java.util.OptionalInt; * used for routing, such as a Host header value that is not necessarily a proper DNS name * @param weight The relative weight of this endpoint * @param routingMethod The routing method used by this endpoint - * @param authMethods Supported authentication methods for each endpoint name + * @param authMethod The authentication method supported by this endpoint * * @author mpolden */ public record ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight, - RoutingMethod routingMethod, Map<String, AuthMethod> authMethods) { + RoutingMethod routingMethod, AuthMethod authMethod) { public ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight, - RoutingMethod routingMethod, Map<String, AuthMethod> authMethods) { + RoutingMethod routingMethod, AuthMethod authMethod) { this.clusterId = nonEmpty(clusterId, "clusterId must be non-empty"); this.scope = Objects.requireNonNull(scope, "scope must be non-null"); this.names = List.copyOf(Objects.requireNonNull(names, "names must be non-null")); this.weight = Objects.requireNonNull(weight, "weight must be non-null"); this.routingMethod = Objects.requireNonNull(routingMethod, "routingMethod must be non-null"); - this.authMethods = Objects.requireNonNull(Map.copyOf(authMethods), "authMethods must be non-null"); + this.authMethod = Objects.requireNonNull(authMethod, "authMethod must be non-null"); } private static String nonEmpty(String s, String message) { diff --git a/controller-server/pom.xml b/controller-server/pom.xml index 5e738a2dd4a..0fcd55eb7d3 100644 --- a/controller-server/pom.xml +++ b/controller-server/pom.xml @@ -91,7 +91,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> @@ -190,8 +190,8 @@ </dependency> <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <scope>test</scope> </dependency> diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java index dea5d048fc5..e3f3d0e7929 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java @@ -3,18 +3,26 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.google.common.collect.Sets; import com.yahoo.component.annotation.Inject; +import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.InstanceName; import com.yahoo.container.jdisc.secretstore.SecretNotFoundException; import com.yahoo.container.jdisc.secretstore.SecretStore; import com.yahoo.transaction.Mutex; import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; +import com.yahoo.vespa.flags.Flags; +import com.yahoo.vespa.flags.PermanentFlags; +import com.yahoo.vespa.flags.StringFlag; import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateDetails; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateProvider; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateRequest; +import com.yahoo.vespa.hosted.controller.application.Endpoint; +import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint; import com.yahoo.vespa.hosted.controller.certificate.UnassignedCertificate; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.secrets.EndpointSecretManager; @@ -34,9 +42,11 @@ import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.OptionalInt; +import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Updates refreshed endpoint certificates and triggers redeployment, and deletes unused certificates. @@ -56,6 +66,9 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer { private final EndpointSecretManager endpointSecretManager; private final EndpointCertificateProvider endpointCertificateProvider; final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at()); + final BooleanFlag assignRandomizedId; + private final StringFlag endpointCertificateAlgo; + private final BooleanFlag useAlternateCertProvider; @Inject public EndpointCertificateMaintainer(Controller controller, Duration interval) { @@ -66,6 +79,9 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer { this.endpointSecretManager = controller.serviceRegistry().secretManager(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); + this.assignRandomizedId = Flags.ASSIGN_RANDOMIZED_ID.bindTo(controller.flagSource()); + this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource()); + this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource()); } @Override @@ -76,6 +92,7 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer { updateRefreshedCertificates(); deleteUnusedCertificates(); deleteOrReportUnmanagedCertificates(); + assignRandomizedIds(); } catch (Exception e) { log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e); return 1.0; @@ -252,6 +269,114 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer { } } + private void assignRandomizedIds() { + List<AssignedCertificate> assignedCertificates = curator.readAssignedCertificates(); + /* + only assign randomized id if: + * instance is present + * randomized id is not already assigned + * feature flag is enabled + */ + assignedCertificates.stream() + .filter(c -> c.instance().isPresent()) + .filter(c -> c.certificate().randomizedId().isEmpty()) + .filter(c -> assignRandomizedId.with(FetchVector.Dimension.APPLICATION_ID, c.application().instance(c.instance().get()).serializedForm()).value()) + .forEach(c -> assignRandomizedId(c.application(), c.instance().get())); + } + + /* + Assign randomized id according to these rules: + * Instance is not mentioned in the deployment spec for this application + -> assume this is a manual deployment. Assign a randomized id to the certificate, save using instance only + * Instance is mentioned in deployment spec: + -> If there is a random endpoint assigned to tenant:application -> use this also for the "instance" certificate + -> Otherwise assign a random endpoint and write to the application and the instance. + */ + private void assignRandomizedId(TenantAndApplicationId tenantAndApplicationId, InstanceName instanceName) { + Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.of(instanceName)); + if (assignedCertificate.isEmpty()) { + log.log(Level.INFO, "Assigned certificate missing for " + tenantAndApplicationId.instance(instanceName).toFullString() + " when assigning randomized id"); + } + // Verify that the assigned certificate still does not have randomized id assigned + if (assignedCertificate.get().certificate().randomizedId().isPresent()) return; + + controller().applications().lockApplicationOrThrow(tenantAndApplicationId, application -> { + DeploymentSpec deploymentSpec = application.get().deploymentSpec(); + if (deploymentSpec.instance(instanceName).isPresent()) { + Optional<AssignedCertificate> applicationLevelAssignedCertificate = curator.readAssignedCertificate(tenantAndApplicationId, Optional.empty()); + assignApplicationRandomId(assignedCertificate.get(), applicationLevelAssignedCertificate); + } else { + assignInstanceRandomId(assignedCertificate.get()); + } + }); + } + + private void assignApplicationRandomId(AssignedCertificate instanceLevelAssignedCertificate, Optional<AssignedCertificate> applicationLevelAssignedCertificate) { + TenantAndApplicationId tenantAndApplicationId = instanceLevelAssignedCertificate.application(); + if (applicationLevelAssignedCertificate.isPresent()) { + // Application level assigned certificate with randomized id already exists. Copy randomized id to instance level certificate and request with random names. + EndpointCertificate withRandomNames = requestRandomNames( + tenantAndApplicationId, + instanceLevelAssignedCertificate.instance(), + applicationLevelAssignedCertificate.get().certificate().randomizedId() + .orElseThrow(() -> new IllegalArgumentException("Application certificate already assigned to " + tenantAndApplicationId.toString() + ", but random id is missing")), + Optional.of(instanceLevelAssignedCertificate.certificate())); + AssignedCertificate assignedCertWithRandomNames = instanceLevelAssignedCertificate.with(withRandomNames); + curator.writeAssignedCertificate(assignedCertWithRandomNames); + } else { + // No application level certificate exists, generate new assigned certificate with the randomized id based names only, then request same names also for instance level cert + String randomId = generateRandomId(); + EndpointCertificate applicationLevelEndpointCert = requestRandomNames(tenantAndApplicationId, Optional.empty(), randomId, Optional.empty()); + AssignedCertificate applicationLevelCert = new AssignedCertificate(tenantAndApplicationId, Optional.empty(), applicationLevelEndpointCert); + + EndpointCertificate instanceLevelEndpointCert = requestRandomNames(tenantAndApplicationId, instanceLevelAssignedCertificate.instance(), randomId, Optional.of(instanceLevelAssignedCertificate.certificate())); + instanceLevelAssignedCertificate = instanceLevelAssignedCertificate.with(instanceLevelEndpointCert); + + // Save both in transaction + try (NestedTransaction transaction = new NestedTransaction()) { + curator.writeAssignedCertificate(instanceLevelAssignedCertificate, transaction); + curator.writeAssignedCertificate(applicationLevelCert, transaction); + transaction.commit(); + } + } + } + + private void assignInstanceRandomId(AssignedCertificate assignedCertificate) { + String randomId = generateRandomId(); + EndpointCertificate withRandomNames = requestRandomNames(assignedCertificate.application(), assignedCertificate.instance(), randomId, Optional.of(assignedCertificate.certificate())); + AssignedCertificate assignedCertWithRandomNames = assignedCertificate.with(withRandomNames); + curator.writeAssignedCertificate(assignedCertWithRandomNames); + } + + private EndpointCertificate requestRandomNames(TenantAndApplicationId tenantAndApplicationId, Optional<InstanceName> instanceName, String randomId, Optional<EndpointCertificate> previousRequest) { + String dnsSuffix = Endpoint.dnsSuffix(controller().system()); + List<String> newSanDnsEntries = List.of( + "*.%s.z%s".formatted(randomId, dnsSuffix), + "*.%s.g%s".formatted(randomId, dnsSuffix), + "*.%s.a%s".formatted(randomId, dnsSuffix)); + List<String> existingSanDnsEntries = previousRequest.map(EndpointCertificate::requestedDnsSans).orElse(List.of()); + List<String> requestNames = Stream.concat(existingSanDnsEntries.stream(), newSanDnsEntries.stream()).toList(); + String key = instanceName.map(tenantAndApplicationId::instance).map(ApplicationId::toFullString).orElseGet(tenantAndApplicationId::toString); + return endpointCertificateProvider.requestCaSignedCertificate( + key, + requestNames, + previousRequest, + endpointCertificateAlgo.value(), + useAlternateCertProvider.value()) + .withRandomizedId(randomId); + } + + private String generateRandomId() { + List<String> unassignedIds = curator.readUnassignedCertificates().stream().map(UnassignedCertificate::id).toList(); + List<String> assignedIds = curator.readAssignedCertificates().stream().map(AssignedCertificate::certificate).map(EndpointCertificate::randomizedId).filter(Optional::isPresent).map(Optional::get).toList(); + Set<String> allIds = Stream.concat(unassignedIds.stream(), assignedIds.stream()).collect(Collectors.toSet()); + String randomId; + do { + randomId = GeneratedEndpoint.createPart(controller().random(true)); + } while (allIds.contains(randomId)); + return randomId; + } + private static String asString(TenantAndApplicationId application, Optional<InstanceName> instanceName) { return application.toString() + instanceName.map(name -> "." + name.value()).orElse(""); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java index c67d88fa81f..133a0c4c4ca 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java @@ -1,7 +1,6 @@ package com.yahoo.vespa.hosted.controller.routing; import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.zone.AuthMethod; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate; import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint; @@ -59,12 +58,14 @@ public record PreparedEndpoints(DeploymentId deployment, Function.identity())); Set<ContainerEndpoint> containerEndpoints = new HashSet<>(); endpoints.scope(Endpoint.Scope.zone).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> { - containerEndpoints.add(new ContainerEndpoint(clusterId.value(), - asString(Endpoint.Scope.zone), - clusterEndpoints.mapToList(Endpoint::dnsName), - OptionalInt.empty(), - clusterEndpoints.first().get().routingMethod(), - authMethodsByDnsName(clusterEndpoints))); + clusterEndpoints.groupingBy(Endpoint::authMethod).forEach((authMethod, endpointsByAuthMethod) -> { + containerEndpoints.add(new ContainerEndpoint(clusterId.value(), + asString(Endpoint.Scope.zone), + endpointsByAuthMethod.mapToList(Endpoint::dnsName), + OptionalInt.empty(), + endpointsByAuthMethod.first().get().routingMethod(), + authMethod)); + }); }); endpoints.scope(Endpoint.Scope.global).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> { for (var endpoint : clusterEndpoints) { @@ -85,7 +86,7 @@ public record PreparedEndpoints(DeploymentId deployment, names, OptionalInt.empty(), endpoint.routingMethod(), - authMethodsByDnsName(EndpointList.of(endpoint)))); + endpoint.authMethod())); } }); endpoints.scope(Endpoint.Scope.application).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> { @@ -99,16 +100,12 @@ public record PreparedEndpoints(DeploymentId deployment, List.of(endpoint.dnsName()), OptionalInt.of(matchingTarget.get().weight()), endpoint.routingMethod(), - authMethodsByDnsName(EndpointList.of(endpoint)))); + endpoint.authMethod())); } }); return containerEndpoints; } - private static Map<String, AuthMethod> authMethodsByDnsName(EndpointList endpoints) { - return endpoints.asList().stream().collect(Collectors.toMap(Endpoint::dnsName, Endpoint::authMethod)); - } - private static String asString(Endpoint.Scope scope) { return switch (scope) { case application -> "application"; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java index 1ac811f0b4f..1a886a50589 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java @@ -335,7 +335,7 @@ public class ControllerTest { "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4, - Map.of("beta.app1.tenant1.global.vespa.oath.cloud", AuthMethod.mtls))); + AuthMethod.mtls)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, @@ -354,7 +354,7 @@ public class ControllerTest { "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4, - Map.of("app1.tenant1.global.vespa.oath.cloud", AuthMethod.mtls))); + AuthMethod.mtls)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); @@ -744,14 +744,11 @@ public class ControllerTest { ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() - .map(kv -> { - Map<String, AuthMethod> authMethods = kv.getKey().stream().collect(Collectors.toMap(Function.identity(), (v) -> AuthMethod.mtls)); - return new ContainerEndpoint("default", "application", - kv.getKey(), - OptionalInt.of(kv.getValue()), - tester.controller().zoneRegistry().routingMethod(deployment.zoneId()), - authMethods); - }) + .map(kv -> new ContainerEndpoint("default", "application", + kv.getKey(), + OptionalInt.of(kv.getValue()), + tester.controller().zoneRegistry().routingMethod(deployment.zoneId()), + AuthMethod.mtls)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java index 422df76f1fb..918a4bed6f4 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java @@ -6,6 +6,8 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.CloudAccount; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.jdisc.test.MockMetric; +import com.yahoo.vespa.flags.Flags; +import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate; @@ -32,6 +34,8 @@ import java.util.Optional; import java.util.OptionalDouble; import java.util.stream.Stream; +import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devUsEast1; +import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.perfUsEast3; import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionUsWest1; import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.stagingTest; import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.systemTest; @@ -174,6 +178,103 @@ public class EndpointCertificateMaintainerTest { assertNotEquals(List.of(), endpointCertificateProvider.listCertificates()); } + @Test + void certificates_are_not_assigned_random_id_when_flag_disabled() { + var app = ApplicationId.from("tenant", "app", "default"); + DeploymentTester deploymentTester = new DeploymentTester(tester); + deployToAssignCert(deploymentTester, app, List.of(systemTest, stagingTest, productionUsWest1), Optional.empty()); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + + maintainer.maintain(); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + } + + @Test + void production_deployment_certificates_are_assigned_random_id() { + var app = ApplicationId.from("tenant", "app", "default"); + DeploymentTester deploymentTester = new DeploymentTester(tester); + deployToAssignCert(deploymentTester, app, List.of(systemTest, stagingTest, productionUsWest1), Optional.empty()); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + + ((InMemoryFlagSource)deploymentTester.controller().flagSource()).withBooleanFlag(Flags.ASSIGN_RANDOMIZED_ID.id(), true); + maintainer.maintain(); + assertEquals(2, tester.curator().readAssignedCertificates().size()); + + // Verify random id is same for application and instance certificates + Optional<AssignedCertificate> applicationCertificate = tester.curator().readAssignedCertificate(TenantAndApplicationId.from(app), Optional.empty()); + assertTrue(applicationCertificate.isPresent()); + Optional<AssignedCertificate> instanceCertificate = tester.curator().readAssignedCertificate(TenantAndApplicationId.from(app), Optional.of(app.instance())); + assertTrue(instanceCertificate.isPresent()); + assertEquals(instanceCertificate.get().certificate().randomizedId(), applicationCertificate.get().certificate().randomizedId()); + + // Verify the 3 wildcard random names are same in all certs + List<String> appWildcardSans = applicationCertificate.get().certificate().requestedDnsSans(); + assertEquals(3, appWildcardSans.size()); + List<String> instanceSans = instanceCertificate.get().certificate().requestedDnsSans(); + List<String> wildcards = instanceSans.stream().filter(appWildcardSans::contains).toList(); + assertEquals(appWildcardSans, wildcards); + } + + @Test + void existing_application_randomid_is_copied_to_new_instance_deployments() { + var instance1 = ApplicationId.from("tenant", "prod", "instance1"); + var instance2 = ApplicationId.from("tenant", "prod", "instance2"); + + DeploymentTester deploymentTester = new DeploymentTester(tester); + deployToAssignCert(deploymentTester, instance1, List.of(systemTest, stagingTest,productionUsWest1),Optional.of("instance1")); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + ((InMemoryFlagSource)deploymentTester.controller().flagSource()).withBooleanFlag(Flags.ASSIGN_RANDOMIZED_ID.id(), true); + maintainer.maintain(); + + String randomId = tester.curator().readAssignedCertificate(instance1).get().certificate().randomizedId().get(); + + deployToAssignCert(deploymentTester, instance2, List.of(productionUsWest1), Optional.of("instance1,instance2")); + maintainer.maintain(); + assertEquals(3, tester.curator().readAssignedCertificates().size()); + + assertEquals(randomId, tester.curator().readAssignedCertificate(instance1).get().certificate().randomizedId().get()); + } + + @Test + void dev_certificates_are_not_assigned_application_level_certificate() { + var devApp = ApplicationId.from("tenant", "devonly", "foo"); + DeploymentTester deploymentTester = new DeploymentTester(tester); + deployToAssignCert(deploymentTester, devApp, List.of(devUsEast1), Optional.empty()); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + ((InMemoryFlagSource)deploymentTester.controller().flagSource()).withBooleanFlag(Flags.ASSIGN_RANDOMIZED_ID.id(), true); + List<String> originalRequestedSans = tester.curator().readAssignedCertificate(devApp).get().certificate().requestedDnsSans(); + maintainer.maintain(); + assertEquals(1, tester.curator().readAssignedCertificates().size()); + + // Verify certificate is assigned random id and 3 new names + Optional<AssignedCertificate> assignedCertificate = tester.curator().readAssignedCertificate(devApp); + assertTrue(assignedCertificate.get().certificate().randomizedId().isPresent()); + List<String> newRequestedSans = assignedCertificate.get().certificate().requestedDnsSans(); + List<String> randomizedNames = newRequestedSans.stream().filter(san -> !originalRequestedSans.contains(san)).toList(); + assertEquals(3, randomizedNames.size()); + } + + private void deployToAssignCert(DeploymentTester tester, ApplicationId applicationId, List<JobType> jobTypes, Optional<String> instances) { + var applicationPackageBuilder = new ApplicationPackageBuilder() + .region("us-west-1"); + instances.map(applicationPackageBuilder::instances); + var applicationPackage = applicationPackageBuilder.build(); + + List<JobType> manualJobs = jobTypes.stream().filter(jt -> jt.environment().isManuallyDeployed()).toList(); + List<JobType> jobs = jobTypes.stream().filter(jt -> ! jt.environment().isManuallyDeployed()).toList(); + + DeploymentContext deploymentContext = tester.newDeploymentContext(applicationId); + deploymentContext.submit(applicationPackage); + manualJobs.forEach(job -> deploymentContext.runJob(job, applicationPackage)); + jobs.forEach(deploymentContext::runJob); + + } + EndpointCertificate certificate(List<String> sans) { + return new EndpointCertificate("keyName", "certName", 0, 0, "root-request-uuid", Optional.of("leaf-request-uuid"), List.of(), "issuer", Optional.empty(), Optional.empty(), Optional.empty()); + } + + + private static AssignedCertificate assignedCertificate(ApplicationId instance, EndpointCertificate certificate) { return new AssignedCertificate(TenantAndApplicationId.from(instance), Optional.of(instance.instance()), certificate); } diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml index 32247ad4e86..8ac71d69b57 100644 --- a/dependency-versions/pom.xml +++ b/dependency-versions/pom.xml @@ -36,15 +36,17 @@ <commons-logging.vespa.version>1.2</commons-logging.vespa.version> <!-- This version is exported by jdisc via jcl-over-slf4j. --> <error-prone-annotations.vespa.version>2.21.1</error-prone-annotations.vespa.version> <guava.vespa.version>32.1.2-jre</guava.vespa.version> - <guice.vespa.version>4.2.3</guice.vespa.version> + <guice.vespa.version>6.0.0</guice.vespa.version> <jackson2.vespa.version>2.15.2</jackson2.vespa.version> <jackson-databind.vespa.version>2.15.2</jackson-databind.vespa.version> + <jakarta.inject.vespa.version>2.0.1</jakarta.inject.vespa.version> <javax.inject.vespa.version>1</javax.inject.vespa.version> <javax.servlet-api.vespa.version>3.1.0</javax.servlet-api.vespa.version> <javax.ws.rs-api.vespa.version>2.1.1</javax.ws.rs-api.vespa.version> - <jaxb.vespa.version>2.3.0</jaxb.vespa.version> + <jaxb-api.vespa.version>2.3.1</jaxb-api.vespa.version> + <jaxb-core.vespa.version>2.3.0.1</jaxb-core.vespa.version> + <jaxb-impl.vespa.version>2.3.0</jaxb-impl.vespa.version> <slf4j.vespa.version>1.7.36</slf4j.vespa.version> - <xml-apis.vespa.version>1.4.01</xml-apis.vespa.version> <!-- END Dependencies available from the Jdisc container --> @@ -53,7 +55,7 @@ <airline.vespa.version>0.9</airline.vespa.version> <antlr.vespa.version>3.5.3</antlr.vespa.version> - <antlr4.vespa.version>4.13.0</antlr4.vespa.version> + <antlr4.vespa.version>4.13.1</antlr4.vespa.version> <apache.httpclient.vespa.version>4.5.14</apache.httpclient.vespa.version> <apache.httpcore.vespa.version>4.4.16</apache.httpcore.vespa.version> <apache.httpclient5.vespa.version>5.2.1</apache.httpclient5.vespa.version> @@ -63,7 +65,7 @@ <assertj.vespa.version>3.24.2</assertj.vespa.version> <!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories --> - <athenz.vespa.version>1.11.40</athenz.vespa.version> + <athenz.vespa.version>1.11.41</athenz.vespa.version> <aws-sdk.vespa.version>1.12.540</aws-sdk.vespa.version> <!-- Athenz END --> @@ -74,7 +76,8 @@ xargs perl -pi -e 's/major = [0-9]+, minor = [0-9]+, micro = [0-9]+/major = 5, minor = 3, micro = 0/g' --> <bouncycastle.vespa.version>1.76</bouncycastle.vespa.version> - <checker-qual.vespa.version>3.37.0</checker-qual.vespa.version> + <byte-buddy.vespa.version>1.14.7</byte-buddy.vespa.version> + <checker-qual.vespa.version>3.38.0</checker-qual.vespa.version> <commons-codec.vespa.version>1.16.0</commons-codec.vespa.version> <commons-csv.vespa.version>1.10.0</commons-csv.vespa.version> <commons-exec.vespa.version>1.3</commons-exec.vespa.version> @@ -83,18 +86,17 @@ <commons.math3.vespa.version>3.6.1</commons.math3.vespa.version> <commons-compress.vespa.version>1.23.0</commons-compress.vespa.version> <curator.vespa.version>5.5.0</curator.vespa.version> - <dropwizard.metrics.vespa.version>3.2.6</dropwizard.metrics.vespa.version> + <dropwizard.metrics.vespa.version>4.2.19</dropwizard.metrics.vespa.version> <eclipse-collections.vespa.version>11.1.0</eclipse-collections.vespa.version> <felix.vespa.version>7.0.5</felix.vespa.version> - <felix.log.vespa.version>1.0.1</felix.log.vespa.version> + <felix.log.vespa.version>1.3.0</felix.log.vespa.version> <findbugs.vespa.version>3.0.2</findbugs.vespa.version> <!-- Should be kept in sync with guava --> <hamcrest.vespa.version>2.2</hamcrest.vespa.version> <hdrhistogram.vespa.version>2.1.12</hdrhistogram.vespa.version> <icu4j.vespa.version>73.2</icu4j.vespa.version> <java-jjwt.vespa.version>0.11.5</java-jjwt.vespa.version> <java-jwt.vespa.version>4.4.0</java-jwt.vespa.version> - <jaxb.runtime.vespa.version>2.3.8</jaxb.runtime.vespa.version> - <jersey.vespa.version>2.40</jersey.vespa.version> + <jaxb.runtime.vespa.version>4.0.3</jaxb.runtime.vespa.version> <jetty.vespa.version>11.0.16</jetty.vespa.version> <jetty-servlet-api.vespa.version>5.0.2</jetty-servlet-api.vespa.version> <jimfs.vespa.version>1.3.0</jimfs.vespa.version> @@ -113,17 +115,17 @@ <netty.vespa.version>4.1.97.Final</netty.vespa.version> <netty-tcnative.vespa.version>2.0.61.Final</netty-tcnative.vespa.version> <onnxruntime.vespa.version>1.15.1</onnxruntime.vespa.version> - <opennlp.vespa.version>1.9.4</opennlp.vespa.version> + <opennlp.vespa.version>2.3.0</opennlp.vespa.version> <opentest4j.vespa.version>1.3.0</opentest4j.vespa.version> <org.json.vespa.version>20230618</org.json.vespa.version> <org.lz4.vespa.version>1.8.0</org.lz4.vespa.version> <prometheus.client.vespa.version>0.16.0</prometheus.client.vespa.version> <protobuf.vespa.version>3.24.2</protobuf.vespa.version> - <questdb.vespa.version>6.3.1</questdb.vespa.version> + <questdb.vespa.version>7.3.1</questdb.vespa.version> <spifly.vespa.version>1.3.6</spifly.vespa.version> <snappy.vespa.version>1.1.10.3</snappy.vespa.version> <surefire.vespa.version>3.1.2</surefire.vespa.version> - <wiremock.vespa.version>2.35.0</wiremock.vespa.version> + <wiremock.vespa.version>3.0.1</wiremock.vespa.version> <xerces.vespa.version>2.12.2</xerces.vespa.version> <zero-allocation-hashing.vespa.version>0.16</zero-allocation-hashing.vespa.version> <zookeeper.client.vespa.version>3.8.0</zookeeper.client.vespa.version> diff --git a/document/pom.xml b/document/pom.xml index 5db432d2447..2b13a4ace4f 100644 --- a/document/pom.xml +++ b/document/pom.xml @@ -65,7 +65,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> <dependency> diff --git a/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp index a3aa7cbb32f..03db333d582 100644 --- a/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp +++ b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp @@ -9,6 +9,7 @@ #include <vespa/vespalib/util/require.h> #include <vespa/vespalib/util/guard.h> #include <vespa/vespalib/util/stringfmt.h> +#include <charconv> using vespalib::make_string_short::fmt; @@ -20,7 +21,12 @@ using vespalib::FilePointer; using namespace vespalib::eval; using namespace vespalib::eval::test; -struct MyError { +struct MyError : public std::exception { + explicit MyError(vespalib::stringref m) : + std::exception(), + msg(m) + {} + const char * what() const noexcept override { return msg.c_str(); } vespalib::string msg; }; @@ -47,17 +53,42 @@ void extract(const vespalib::string &str, const vespalib::string &prefix, vespal dst = str.substr(pos); } } +struct MemoryUsage { + size_t size; + size_t rss; +}; -void report_memory_usage(const vespalib::string &desc) { - vespalib::string vm_size = "unknown"; - vespalib::string vm_rss = "unknown"; - vespalib::string line; +static const vespalib::string UNKNOWN = "unknown"; + +size_t convert(const vespalib::string & s) { + if (s == UNKNOWN) return 0; + size_t v(0); + size_t end = s.find("kB"); + auto [ptr,ec] = std::from_chars(s.data(), s.data()+std::min(s.size(), end), v, 10); + if (ec != std::errc()) { + throw std::runtime_error(fmt("Bad format : '%s' at '%s'", s.c_str(), ptr)); + } + if (end == vespalib::string::npos) { + throw std::runtime_error(fmt("Bad format : %s", s.c_str())); + } + return v * 1024; +} + +MemoryUsage extract_memory_usage() { + vespalib::string vm_size = UNKNOWN; + vespalib::string vm_rss = UNKNOWN; FilePointer file(fopen("/proc/self/status", "r")); + vespalib::string line; while (read_line(file, line)) { extract(line, "VmSize:", vm_size); extract(line, "VmRSS:", vm_rss); } - fprintf(stderr, "vm_size: %s, vm_rss: %s (%s)\n", vm_size.c_str(), vm_rss.c_str(), desc.c_str()); + return {convert(vm_size), convert(vm_rss)}; +} + +void report_memory_usage(const vespalib::string &desc) { + MemoryUsage vm = extract_memory_usage(); + fprintf(stderr, "vm_size: %zu kB, vm_rss: %zu kB (%s)\n", vm.size/1024, vm.rss/1024, desc.c_str()); } struct Options { @@ -118,7 +149,7 @@ void dump_wire_info(const Onnx::WireInfo &wire) { struct MakeInputType { Options &opts; std::map<vespalib::string,int> symbolic_sizes; - MakeInputType(Options &opts_in) : opts(opts_in), symbolic_sizes() {} + explicit MakeInputType(Options &opts_in) : opts(opts_in), symbolic_sizes() {} ValueType operator()(const Onnx::TensorInfo &info) { int d = 0; std::vector<ValueType::Dimension> dim_list; @@ -229,30 +260,32 @@ int probe_types() { if (!JsonFormat::decode(std_in, params)) { throw MyError{"invalid json"}; } + MemoryUsage vm_before = extract_memory_usage(); Slime result; auto &root = result.setObject(); auto &types = root.setObject("outputs"); - Onnx model(params["model"].asString().make_string(), Onnx::Optimize::DISABLE); + Onnx model(params["model"].asString().make_string(), Onnx::Optimize::ENABLE); Onnx::WirePlanner planner; - for (size_t i = 0; i < model.inputs().size(); ++i) { - auto spec = params["inputs"][model.inputs()[i].name].asString().make_string(); + for (const auto & input : model.inputs()) { + auto spec = params["inputs"][input.name].asString().make_string(); auto input_type = ValueType::from_spec(spec); if (input_type.is_error()) { - if (!params["inputs"][model.inputs()[i].name].valid()) { - throw MyError{fmt("missing type for model input '%s'", - model.inputs()[i].name.c_str())}; + if (!params["inputs"][input.name].valid()) { + throw MyError(fmt("missing type for model input '%s'", input.name.c_str())); } else { - throw MyError{fmt("invalid type for model input '%s': '%s'", - model.inputs()[i].name.c_str(), spec.c_str())}; + throw MyError(fmt("invalid type for model input '%s': '%s'",input.name.c_str(), spec.c_str())); } } - bind_input(planner, model.inputs()[i], input_type); + bind_input(planner, input, input_type); } planner.prepare_output_types(model); for (const auto &output: model.outputs()) { auto output_type = make_output(planner, output); types.setString(output.name, output_type.to_spec()); } + MemoryUsage vm_after = extract_memory_usage(); + root.setLong("vm_size", vm_after.size - vm_before.size); + root.setLong("vm_rss", vm_after.rss - vm_before.rss); write_compact(result, std_out); return 0; } diff --git a/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp b/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp index e101487ff59..cfc2277278f 100644 --- a/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp +++ b/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp @@ -40,7 +40,9 @@ struct Event { res_addr.push_back(make_handle(label)); } } - auto operator<=>(const Event &rhs) const = default; + bool operator==(const Event& rhs) const noexcept { + return lhs_idx == rhs.lhs_idx && rhs_idx == rhs.rhs_idx && res_addr == rhs.res_addr; + } }; struct Trace { @@ -55,7 +57,9 @@ struct Trace { events.emplace_back(lhs_idx, rhs_idx, res_addr); return *this; } - auto operator<=>(const Trace &rhs) const = default; + bool operator==(const Trace& rhs) const noexcept { + return estimate == rhs.estimate && events == rhs.events; + } }; std::ostream & diff --git a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp index 3f60ad69b86..e3393dc2de7 100644 --- a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp +++ b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp @@ -4,11 +4,19 @@ #include <vespa/eval/eval/value_codec.h> #include <vespa/eval/eval/interpreted_function.h> #include <vespa/eval/eval/tensor_function.h> +#include <vespa/eval/eval/lazy_params.h> +#include <vespa/eval/eval/make_tensor_function.h> +#include <vespa/eval/eval/optimize_tensor_function.h> +#include <vespa/eval/eval/compile_tensor_function.h> #include <vespa/eval/instruction/universal_dot_product.h> #include <vespa/eval/eval/test/reference_operations.h> +#include <vespa/eval/eval/test/reference_evaluation.h> #include <vespa/eval/eval/test/gen_spec.h> +#include <vespa/vespalib/util/benchmark_timer.h> +#include <vespa/vespalib/util/classname.h> #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/gtest/gtest.h> +#include <optional> using namespace vespalib; using namespace vespalib::eval; @@ -17,6 +25,8 @@ using namespace vespalib::eval::test; using vespalib::make_string_short::fmt; const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get(); +bool bench = false; +double budget = 1.0; GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 33.0) / 16.0; }; @@ -43,6 +53,169 @@ const std::vector<std::vector<vespalib::string>> reductions = { {}, {"x"}, {"y"}, {"z"}, {"x", "y"}, {"x", "z"}, {"y", "z"} }; +std::vector<std::string> ns_list = { + {"vespalib::eval::instruction::(anonymous namespace)::"}, + {"vespalib::eval::(anonymous namespace)::"}, + {"vespalib::eval::InterpretedFunction::"}, + {"vespalib::eval::tensor_function::"}, + {"vespalib::eval::operation::"}, + {"vespalib::eval::aggr::"}, + {"vespalib::eval::"} +}; +std::string strip_ns(const vespalib::string &str) { + std::string tmp = str; + for (const auto &ns: ns_list) { + for (bool again = true; again;) { + again = false; + if (auto pos = tmp.find(ns); pos < tmp.size()) { + tmp.erase(pos, ns.size()); + again = true; + } + } + } + return tmp; +} + +TensorSpec make_spec(const vespalib::string ¶m_name, size_t idx) { + return GenSpec::from_desc(param_name).cells_double().seq(N(1 + idx)); +} + +TensorSpec eval_ref(const Function &fun) { + std::vector<TensorSpec> params; + for (size_t i = 0; i < fun.num_params(); ++i) { + params.push_back(make_spec(fun.param_name(i), i)); + } + return ReferenceEvaluation::eval(fun, params); +} + +class Optimize +{ +private: + struct ctor_tag{}; +public: + enum class With { NONE, CUSTOM, PROD, SPECIFIC }; + With with; + vespalib::string name; + OptimizeTensorFunctionOptions options; + tensor_function_optimizer optimizer; + Optimize(With with_in, const vespalib::string name_in, + const OptimizeTensorFunctionOptions &options_in, + tensor_function_optimizer optimizer_in, ctor_tag) + : with(with_in), name(name_in), options(options_in), optimizer(optimizer_in) {} + static Optimize none() { return {With::NONE, "none", {}, {}, {}}; } + static Optimize prod() { return {With::PROD, "prod", {}, {}, {}}; } + static Optimize custom(const vespalib::string &name_in, const OptimizeTensorFunctionOptions &options_in) { + return {With::CUSTOM, name_in, options_in, {}, {}}; + } + static Optimize specific(const vespalib::string &name_in, tensor_function_optimizer optimizer_in) { + return {With::SPECIFIC, name_in, {}, optimizer_in, {}}; + } + ~Optimize(); +}; +Optimize::~Optimize() = default; + +Optimize baseline() { + OptimizeTensorFunctionOptions my_options; + my_options.allow_universal_dot_product = false; + return Optimize::custom("baseline", my_options); +} + +Optimize with_universal() { + OptimizeTensorFunctionOptions my_options; + my_options.allow_universal_dot_product = true; + return Optimize::custom("with_universal", my_options); +} + +Optimize universal_only() { + auto my_optimizer = [](const TensorFunction &expr, Stash &stash)->const TensorFunction & + { + return UniversalDotProduct::optimize(expr, stash, true); + }; + return Optimize::specific("universal_only", my_optimizer); +} + +using cost_map_t = std::map<vespalib::string,double>; +std::vector<std::pair<vespalib::string,cost_map_t>> benchmark_results; + +void benchmark(const vespalib::string &desc, const vespalib::string &expr, std::vector<Optimize> list) { + auto fun = Function::parse(expr); + ASSERT_FALSE(fun->has_error()); + auto expected = eval_ref(*fun); + cost_map_t cost_map; + fprintf(stderr, "BENCH: %s (%s)\n", desc.c_str(), expr.c_str()); + for (Optimize &optimize: list) { + std::vector<Value::UP> values; + for (size_t i = 0; i < fun->num_params(); ++i) { + auto value = value_from_spec(make_spec(fun->param_name(i), i), prod_factory); + values.push_back(std::move(value)); + } + SimpleObjectParams params({}); + std::vector<ValueType> param_types; + for (auto &&up: values) { + params.params.emplace_back(*up); + param_types.push_back(up->type()); + } + NodeTypes node_types(*fun, param_types); + ASSERT_FALSE(node_types.get_type(fun->root()).is_error()); + Stash stash; + const TensorFunction &plain_fun = make_tensor_function(prod_factory, fun->root(), node_types, stash); + const TensorFunction *optimized = nullptr; + switch (optimize.with) { + case Optimize::With::NONE: + optimized = std::addressof(plain_fun); + break; + case Optimize::With::PROD: + optimized = std::addressof(optimize_tensor_function(prod_factory, plain_fun, stash)); + break; + case Optimize::With::CUSTOM: + optimized = std::addressof(optimize_tensor_function(prod_factory, plain_fun, stash, optimize.options)); + break; + case Optimize::With::SPECIFIC: + size_t count = 0; + optimized = std::addressof(apply_tensor_function_optimizer(plain_fun, optimize.optimizer, stash, &count)); + ASSERT_GT(count, 0); + break; + } + ASSERT_NE(optimized, nullptr); + CTFMetaData ctf_meta; + InterpretedFunction ifun(prod_factory, *optimized, &ctf_meta); + ASSERT_EQ(ctf_meta.steps.size(), ifun.program_size()); + BenchmarkTimer timer(budget); + std::vector<duration> prev_time(ctf_meta.steps.size(), duration::zero()); + std::vector<duration> min_time(ctf_meta.steps.size(), duration::max()); + InterpretedFunction::ProfiledContext pctx(ifun); + for (bool first = true; timer.has_budget(); first = false) { + const Value &profiled_result = ifun.eval(pctx, params); + if (first) { + EXPECT_EQ(spec_from_value(profiled_result), expected); + } + timer.before(); + const Value &result = ifun.eval(pctx.context, params); + timer.after(); + if (first) { + EXPECT_EQ(spec_from_value(result), expected); + } + for (size_t i = 0; i < ctf_meta.steps.size(); ++i) { + min_time[i] = std::min(min_time[i], pctx.cost[i].second - prev_time[i]); + prev_time[i] = pctx.cost[i].second; + } + } + double cost_us = timer.min_time() * 1000.0 * 1000.0; + cost_map.emplace(optimize.name, cost_us); + fprintf(stderr, " optimized with: %s: %g us {\n", optimize.name.c_str(), cost_us); + for (size_t i = 0; i < ctf_meta.steps.size(); ++i) { + auto name = strip_ns(ctf_meta.steps[i].class_name); + if (name.find("Inject") > name.size() && name.find("ConstValue") > name.size()) { + fprintf(stderr, " %s: %zu ns\n", name.c_str(), (size_t)count_ns(min_time[i])); + fprintf(stderr, " +-- %s\n", strip_ns(ctf_meta.steps[i].symbol_name).c_str()); + } + } + fprintf(stderr, " }\n"); + } + fprintf(stderr, "\n"); + benchmark_results.emplace_back(desc, std::move(cost_map)); +} + TensorSpec perform_dot_product(const TensorSpec &a, const TensorSpec &b, const std::vector<vespalib::string> &dims) { Stash stash; @@ -86,4 +259,87 @@ TEST(UniversalDotProductTest, generic_dot_product_works_for_various_cases) { fprintf(stderr, "total test cases run: %zu\n", test_cases); } -GTEST_MAIN_RUN_ALL_TESTS() +TEST(UniversalDotProductTest, bench_vector_dot_product) { + if (!bench) { + fprintf(stderr, "benchmarking disabled, run with 'bench' parameter to enable\n"); + return; + } + auto optimize_list = std::vector<Optimize>({baseline(), with_universal(), universal_only()}); + + benchmark("number number", "reduce(1.0*2.0,sum)", optimize_list); + benchmark("number vector", "reduce(5.0*x128,sum,x)", optimize_list); + benchmark("vector vector small", "reduce(x16*x16,sum,x)", optimize_list); + benchmark("vector vector large", "reduce(x768*x768,sum,x)", optimize_list); + benchmark("vector matrix full", "reduce(y64*x8y64,sum,x,y)", optimize_list); + benchmark("vector matrix inner", "reduce(y64*x8y64,sum,y)", optimize_list); + benchmark("vector matrix outer", "reduce(y64*x8y64,sum,x)", optimize_list); + benchmark("matrix matrix same", "reduce(a8y64*a8y64,sum,y)", optimize_list); + benchmark("matrix matrix different", "reduce(a8y64*b8y64,sum,y)", optimize_list); + benchmark("matmul", "reduce(a8b64*b64c8,sum,b)", optimize_list); + benchmark("sparse overlap", "reduce(x64_1*x64_1,sum,x)", optimize_list); + benchmark("sparse no overlap", "reduce(a64_1*b64_1,sum,b)", optimize_list); + benchmark("mixed dense", "reduce(a1_16x768*x768,sum,x)", optimize_list); + benchmark("mixed mixed complex", "reduce(a1_1x128*a2_1b64_1x128,sum,a,x)", optimize_list); + + size_t max_desc_size = 0; + for (const auto &[desc, cost_map]: benchmark_results) { + max_desc_size = std::max(max_desc_size, desc.size()); + } + for (const auto &[desc, cost_map]: benchmark_results) { + for (size_t i = 0; i < max_desc_size - desc.size(); ++i) { + fprintf(stderr, " "); + } + fprintf(stderr, "%s: ", desc.c_str()); + size_t cnt = 0; + double baseline_cost = 0.0; + double with_universal_cost = 0.0; + double universal_only_cost = 0.0; + for (const auto &[name, cost]: cost_map) { + if (++cnt > 1) { + fprintf(stderr, ", "); + } + if (name == "baseline") { + baseline_cost = cost; + } else if (name == "with_universal") { + with_universal_cost = cost; + } else if (name == "universal_only") { + universal_only_cost = cost; + } + fprintf(stderr, "%s: %8.3f us", name.c_str(), cost); + } + if (with_universal_cost > 1.1 * baseline_cost) { + fprintf(stderr, ", LOSS: %8.3f", with_universal_cost / baseline_cost); + } + if (baseline_cost > 1.1 * with_universal_cost) { + fprintf(stderr, ", GAIN: %8.3f", baseline_cost / with_universal_cost); + } + if (with_universal_cost > 1.1 * universal_only_cost) { + fprintf(stderr, ", MISSED: %8.3f", with_universal_cost / universal_only_cost); + } + fprintf(stderr, "\n"); + } + fprintf(stderr, "\n"); +} + +int main(int argc, char **argv) { + const std::string bench_option = "bench"; + const std::string fast_option = "fast"; + const std::string slow_option = "slow"; + if ((argc > 1) && (bench_option == argv[1])) { + bench = true; + ++argv; + --argc; + } + if ((argc > 1) && (fast_option == argv[1])) { + budget = 0.1; + ++argv; + --argc; + } + if ((argc > 1) && (slow_option == argv[1])) { + budget = 5.0; + ++argv; + --argc; + } + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp index 48683291cfb..8a32b7d11ff 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.cpp +++ b/eval/src/vespa/eval/eval/interpreted_function.cpp @@ -68,6 +68,8 @@ InterpretedFunction::ProfiledContext::ProfiledContext(const InterpretedFunction { } +InterpretedFunction::ProfiledContext::~ProfiledContext() = default; + vespalib::string InterpretedFunction::Instruction::resolve_symbol() const { diff --git a/eval/src/vespa/eval/eval/interpreted_function.h b/eval/src/vespa/eval/eval/interpreted_function.h index 4528ccb79aa..86ab22073da 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.h +++ b/eval/src/vespa/eval/eval/interpreted_function.h @@ -74,6 +74,7 @@ public: Context context; std::vector<std::pair<size_t,duration>> cost; ProfiledContext(const InterpretedFunction &ifun); + ~ProfiledContext(); }; using op_function = void (*)(State &, uint64_t); class Instruction { diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp index 3d9152d6b80..4013021aaa4 100644 --- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp +++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp @@ -44,6 +44,13 @@ LOG_SETUP(".eval.eval.optimize_tensor_function"); namespace vespalib::eval { +OptimizeTensorFunctionOptions::OptimizeTensorFunctionOptions() noexcept + : allow_universal_dot_product(false) +{ +} + +OptimizeTensorFunctionOptions::~OptimizeTensorFunctionOptions() = default; + namespace { using Child = TensorFunction::Child; @@ -60,7 +67,9 @@ void run_optimize_pass(const Child &root, Func&& optimize_node) { } } -const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const TensorFunction &expr, Stash &stash) { +const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const TensorFunction &expr, Stash &stash, + const OptimizeTensorFunctionOptions &options) +{ Child root(expr); run_optimize_pass(root, [&stash](const Child &child) { @@ -78,7 +87,7 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te child.set(L2Distance::optimize(child.get(), stash)); child.set(MixedL2Distance::optimize(child.get(), stash)); }); - run_optimize_pass(root, [&stash](const Child &child) + run_optimize_pass(root, [&stash,&options](const Child &child) { child.set(DenseDotProductFunction::optimize(child.get(), stash)); child.set(SparseDotProductFunction::optimize(child.get(), stash)); @@ -89,7 +98,9 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te child.set(DenseHammingDistance::optimize(child.get(), stash)); child.set(SimpleJoinCount::optimize(child.get(), stash)); child.set(MappedLookup::optimize(child.get(), stash)); - // child.set(UniversalDotProduct::optimize(child.get(), stash)); + if (options.allow_universal_dot_product) { + child.set(UniversalDotProduct::optimize(child.get(), stash, false)); + } }); run_optimize_pass(root, [&stash](const Child &child) { @@ -116,11 +127,33 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te } // namespace vespalib::eval::<unnamed> -const TensorFunction &optimize_tensor_function(const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash) { +const TensorFunction &optimize_tensor_function(const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash, + const OptimizeTensorFunctionOptions &options) +{ LOG(debug, "tensor function before optimization:\n%s\n", function.as_string().c_str()); - const TensorFunction &optimized = optimize_for_factory(factory, function, stash); + const TensorFunction &optimized = optimize_for_factory(factory, function, stash, options); LOG(debug, "tensor function after optimization:\n%s\n", optimized.as_string().c_str()); return optimized; } +const TensorFunction &optimize_tensor_function(const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash) { + return optimize_tensor_function(factory, function, stash, OptimizeTensorFunctionOptions()); +} + +const TensorFunction &apply_tensor_function_optimizer(const TensorFunction &function, tensor_function_optimizer optimizer, Stash &stash, size_t *count) { + Child root(function); + run_optimize_pass(root, [&](const Child &child) + { + const TensorFunction &child_before = child.get(); + const TensorFunction &child_after = optimizer(child_before, stash); + if (std::addressof(child_after) != std::addressof(child_before)) { + child.set(child_after); + if (count != nullptr) { + ++(*count); + } + } + }); + return root.get(); +} + } // namespace vespalib::eval diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.h b/eval/src/vespa/eval/eval/optimize_tensor_function.h index d8ed104f3a6..4a5945860e7 100644 --- a/eval/src/vespa/eval/eval/optimize_tensor_function.h +++ b/eval/src/vespa/eval/eval/optimize_tensor_function.h @@ -2,13 +2,26 @@ #pragma once +#include <functional> + namespace vespalib { class Stash; } namespace vespalib::eval { +struct OptimizeTensorFunctionOptions { + bool allow_universal_dot_product; + OptimizeTensorFunctionOptions() noexcept; + ~OptimizeTensorFunctionOptions(); +}; + struct ValueBuilderFactory; struct TensorFunction; +const TensorFunction &optimize_tensor_function(const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash, + const OptimizeTensorFunctionOptions &options); const TensorFunction &optimize_tensor_function(const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash); +using tensor_function_optimizer = std::function<const TensorFunction &(const TensorFunction &expr, Stash &stash)>; +const TensorFunction &apply_tensor_function_optimizer(const TensorFunction &function, tensor_function_optimizer optimizer, Stash &stash, size_t *count = nullptr); + } // namespace vespalib::eval diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp index 90761e43a01..ef81fb27def 100644 --- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp +++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp @@ -63,25 +63,18 @@ struct MyMutableInject : public tensor_function::Inject { }; const TensorFunction &maybe_patch(bool allow_mutable, const TensorFunction &plain_fun, const std::set<size_t> &mutable_set, Stash &stash) { - using Child = TensorFunction::Child; if (!allow_mutable) { return plain_fun; } - Child root(plain_fun); - std::vector<Child::CREF> nodes({root}); - for (size_t i = 0; i < nodes.size(); ++i) { - nodes[i].get().get().push_children(nodes); - } - while (!nodes.empty()) { - const Child &child = nodes.back(); - if (auto inject = as<tensor_function::Inject>(child.get())) { - if (mutable_set.count(inject->param_idx()) > 0) { - child.set(stash.create<MyMutableInject>(inject->result_type(), inject->param_idx())); - } - } - nodes.pop_back(); - } - return root.get(); + auto optimizer = [&mutable_set](const TensorFunction &node, Stash &my_stash)->const TensorFunction &{ + if (auto inject = as<tensor_function::Inject>(node); + inject && mutable_set.count(inject->param_idx()) > 0) + { + return my_stash.create<MyMutableInject>(inject->result_type(), inject->param_idx()); + } + return node; + }; + return apply_tensor_function_optimizer(plain_fun, optimizer, stash); } std::vector<Value::UP> make_params(const ValueBuilderFactory &factory, const Function &function, diff --git a/eval/src/vespa/eval/instruction/universal_dot_product.cpp b/eval/src/vespa/eval/instruction/universal_dot_product.cpp index 79a94d862bf..86e6be52de4 100644 --- a/eval/src/vespa/eval/instruction/universal_dot_product.cpp +++ b/eval/src/vespa/eval/instruction/universal_dot_product.cpp @@ -84,6 +84,14 @@ struct SelectUniversalDotProduct { } }; +bool check_types(const ValueType &res, const ValueType &lhs, const ValueType &rhs) { + UniversalDotProductParam param(res, lhs, rhs); + if (param.vector_size < 8) { + return false; + } + return true; +} + } // namespace <unnamed> UniversalDotProduct::UniversalDotProduct(const ValueType &res_type_in, @@ -106,11 +114,13 @@ UniversalDotProduct::compile_self(const ValueBuilderFactory &, Stash &stash) con } const TensorFunction & -UniversalDotProduct::optimize(const TensorFunction &expr, Stash &stash) +UniversalDotProduct::optimize(const TensorFunction &expr, Stash &stash, bool force) { if (auto reduce = as<Reduce>(expr); reduce && (reduce->aggr() == Aggr::SUM)) { if (auto join = as<Join>(reduce->child()); join && (join->function() == Mul::f)) { - return stash.create<UniversalDotProduct>(expr.result_type(), join->lhs(), join->rhs()); + if (force || check_types(expr.result_type(), join->lhs().result_type(), join->rhs().result_type())) { + return stash.create<UniversalDotProduct>(expr.result_type(), join->lhs(), join->rhs()); + } } } return expr; diff --git a/eval/src/vespa/eval/instruction/universal_dot_product.h b/eval/src/vespa/eval/instruction/universal_dot_product.h index ac5aa157f17..40fd109cc73 100644 --- a/eval/src/vespa/eval/instruction/universal_dot_product.h +++ b/eval/src/vespa/eval/instruction/universal_dot_product.h @@ -9,6 +9,9 @@ namespace vespalib::eval { /** * Tensor function performing dot product compatible operations * (join:mul, reduce:sum) on values of arbitrary complexity. + * + * Note: can evaluate 'anything', but unless 'force' is given; will + * try to be a bit conservative about when to optimize. **/ class UniversalDotProduct : public tensor_function::Op2 { @@ -16,7 +19,7 @@ public: UniversalDotProduct(const ValueType &res_type, const TensorFunction &lhs, const TensorFunction &rhs); InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override; bool result_is_mutable() const override { return true; } - static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash); + static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash, bool force); }; } // namespace diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp index 8f9450c2660..89d88dcc32c 100644 --- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp +++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp @@ -8,10 +8,6 @@ #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/typify.h> #include <vespa/vespalib/util/classname.h> -#include <assert.h> -#include <cmath> -#include <stdlib.h> -#include <stdio.h> #include <type_traits> #include <vespa/log/log.h> @@ -171,15 +167,15 @@ private: public: OnnxString(const OnnxString &rhs) = delete; OnnxString &operator=(const OnnxString &rhs) = delete; - OnnxString(OnnxString &&rhs) = default; - OnnxString &operator=(OnnxString &&rhs) = default; + OnnxString(OnnxString &&rhs) noexcept = default; + OnnxString &operator=(OnnxString &&rhs) noexcept = default; const char *get() const { return _str.get(); } ~OnnxString() = default; static OnnxString get_input_name(const Ort::Session &session, size_t idx) { - return OnnxString(session.GetInputNameAllocated(idx, _alloc)); + return {session.GetInputNameAllocated(idx, _alloc)}; } static OnnxString get_output_name(const Ort::Session &session, size_t idx) { - return OnnxString(session.GetOutputNameAllocated(idx, _alloc)); + return {session.GetOutputNameAllocated(idx, _alloc)}; } }; Ort::AllocatorWithDefaultOptions OnnxString::_alloc; @@ -216,7 +212,7 @@ Onnx::TensorType get_type_of(const Ort::Value &value) { throw Ort::Exception("[onnx wrapper] actual value has unknown dimension size", ORT_FAIL); } } - return Onnx::TensorType(make_element_type(element_type), shape); + return {make_element_type(element_type), shape}; } std::vector<int64_t> extract_sizes(const ValueType &type) { @@ -306,7 +302,7 @@ Onnx::WirePlanner::do_model_probe(const Onnx &model) result_values.emplace_back(nullptr); } Ort::RunOptions run_opts(nullptr); - Ort::Session &session = const_cast<Ort::Session&>(model._session); + auto &session = const_cast<Ort::Session&>(model._session); session.Run(run_opts, model._input_name_refs.data(), param_values.data(), param_values.size(), model._output_name_refs.data(), result_values.data(), result_values.size()); @@ -554,7 +550,7 @@ Onnx::EvalContext::EvalContext(const Onnx &model, const WireInfo &wire_info) const auto &vespa = _wire_info.vespa_inputs[i]; const auto &onnx = _wire_info.onnx_inputs[i]; if (is_same_type(vespa.cell_type(), onnx.elements)) { - _param_values.push_back(Ort::Value(nullptr)); + _param_values.emplace_back(nullptr); _param_binders.push_back(SelectAdaptParam()(vespa.cell_type())); } else { _param_values.push_back(CreateOnnxTensor()(onnx, _alloc)); @@ -587,7 +583,7 @@ Onnx::EvalContext::bind_param(size_t i, const Value ¶m) void Onnx::EvalContext::eval() { - Ort::Session &session = const_cast<Ort::Session&>(_model._session); + auto &session = const_cast<Ort::Session&>(_model._session); Ort::RunOptions run_opts(nullptr); session.Run(run_opts, _model._input_name_refs.data(), _param_values.data(), _param_values.size(), diff --git a/flags/pom.xml b/flags/pom.xml index 0bfb02b1f32..816e5416cec 100644 --- a/flags/pom.xml +++ b/flags/pom.xml @@ -51,7 +51,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java index ea716556210..f1d639432cc 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java @@ -191,13 +191,13 @@ public class Flags { // TODO: Move to a permanent flag public static final UnboundListFlag<String> ALLOWED_ATHENZ_PROXY_IDENTITIES = defineListFlag( "allowed-athenz-proxy-identities", List.of(), String.class, - List.of("bjorncs", "tokle"), "2021-02-10", "2023-09-01", + List.of("bjorncs", "tokle"), "2021-02-10", "2023-10-01", "Allowed Athenz proxy identities", "takes effect at redeployment"); public static final UnboundIntFlag MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS = defineIntFlag( "max-activation-inhibited-out-of-sync-groups", 0, - List.of("vekterli"), "2021-02-19", "2023-09-01", + List.of("vekterli"), "2021-02-19", "2023-12-01", "Allows replicas in up to N content groups to not be activated " + "for query visibility if they are out of sync with a majority of other replicas", "Takes effect at redeployment", @@ -205,7 +205,7 @@ public class Flags { public static final UnboundDoubleFlag MIN_NODE_RATIO_PER_GROUP = defineDoubleFlag( "min-node-ratio-per-group", 0.0, - List.of("geirst", "vekterli"), "2021-07-16", "2023-09-01", + List.of("geirst", "vekterli"), "2021-07-16", "2023-12-01", "Minimum ratio of nodes that have to be available (i.e. not Down) in any hierarchic content cluster group for the group to be Up", "Takes effect at redeployment", APPLICATION_ID); @@ -230,7 +230,7 @@ public class Flags { public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag( "enabled-horizon-dashboard", false, - List.of("olaa"), "2021-09-13", "2023-09-01", + List.of("olaa"), "2021-09-13", "2023-12-01", "Enable Horizon dashboard", "Takes effect immediately", TENANT_ID, CONSOLE_USER_EMAIL @@ -252,7 +252,7 @@ public class Flags { public static final UnboundBooleanFlag ENABLE_PROXY_PROTOCOL_MIXED_MODE = defineFeatureFlag( "enable-proxy-protocol-mixed-mode", true, - List.of("tokle"), "2022-05-09", "2023-09-01", + List.of("tokle"), "2022-05-09", "2023-10-01", "Enable or disable proxy protocol mixed mode", "Takes effect on redeployment", APPLICATION_ID); @@ -266,7 +266,7 @@ public class Flags { public static final UnboundBooleanFlag SEPARATE_METRIC_CHECK_CONFIG = defineFeatureFlag( "separate-metric-check-config", false, - List.of("olaa"), "2022-07-04", "2023-09-01", + List.of("olaa"), "2022-07-04", "2023-12-01", "Determines whether one metrics config check should be written per Vespa node", "Takes effect on next tick", HOSTNAME); @@ -281,7 +281,7 @@ public class Flags { public static final UnboundBooleanFlag ENABLE_OTELCOL = defineFeatureFlag( "enable-otel-collector", false, - List.of("olaa"), "2022-09-23", "2023-09-01", + List.of("olaa"), "2022-09-23", "2023-12-01", "Whether an OpenTelemetry collector should be enabled", "Takes effect at next tick", APPLICATION_ID); @@ -317,14 +317,14 @@ public class Flags { "Takes effect at next host-admin tick"); public static final UnboundListFlag<String> WEIGHTED_ENDPOINT_RECORD_TTL = defineListFlag( - "weighted-endpoint-record-ttl", List.of(), String.class, List.of("jonmv"), "2023-05-16", "2023-09-01", + "weighted-endpoint-record-ttl", List.of(), String.class, List.of("jonmv"), "2023-05-16", "2024-01-01", "A list of endpoints and custom TTLs, on the form \"endpoint-fqdn:TTL-seconds\". " + "Where specified, CNAME records are used instead of the default ALIAS records, which have a default 60s TTL.", "Takes effect at redeployment from controller"); public static final UnboundBooleanFlag ENABLE_DATAPLANE_PROXY = defineFeatureFlag( "enable-dataplane-proxy", false, - List.of("mortent", "olaa"), "2023-05-15", "2023-09-01", + List.of("mortent", "olaa"), "2023-05-15", "2023-10-01", "Whether to enable dataplane proxy", "Takes effect at redeployment", APPLICATION_ID @@ -375,6 +375,13 @@ public class Flags { "Whether to write application data (active session id, last deployed session id etc. ) as json", "Takes effect immediately"); + public static final UnboundBooleanFlag ASSIGN_RANDOMIZED_ID = defineFeatureFlag( + "assign-randomized-id", false, + List.of("mortent"), "2023-08-31", "2024-02-01", + "Whether to assign randomized id to the application", + "Takes effect immediately", + APPLICATION_ID); + /** WARNING: public for testing: All flags should be defined in {@link Flags}. */ public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners, String createdAt, String expiresAt, String description, diff --git a/http-client/pom.xml b/http-client/pom.xml index c8a58330f24..e5900d28009 100644 --- a/http-client/pom.xml +++ b/http-client/pom.xml @@ -48,8 +48,8 @@ <scope>test</scope> </dependency> <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <scope>test</scope> </dependency> <dependency> diff --git a/jdisc-security-filters/pom.xml b/jdisc-security-filters/pom.xml index 3440f9089d7..8a456d06a40 100644 --- a/jdisc-security-filters/pom.xml +++ b/jdisc-security-filters/pom.xml @@ -69,6 +69,11 @@ <version>${project.version}</version> <scope>test</scope> </dependency> + <dependency> + <groupId>jakarta.inject</groupId> + <artifactId>jakarta.inject-api</artifactId> + <scope>test</scope> + </dependency> </dependencies> <build> diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml index 4471269358a..9bc27cd4e77 100644 --- a/jdisc_core/pom.xml +++ b/jdisc_core/pom.xml @@ -36,11 +36,6 @@ <!-- jaxb end --> <dependency> - <!-- Newer version than the one in rt.jar, including the ElementTraversal class needed by Xerces (Aug 2015, still valid Sep 2017) --> - <groupId>xml-apis</groupId> - <artifactId>xml-apis</artifactId> - </dependency> - <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> <scope>test</scope> @@ -87,7 +82,6 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> <!-- Non-AOP version required for Java 8 compatibility --> </dependency> <dependency> <groupId>org.apache.felix</groupId> @@ -105,11 +99,7 @@ <exclusions> <exclusion> <groupId>org.osgi</groupId> - <artifactId>org.osgi.compendium</artifactId> - </exclusion> - <exclusion> - <groupId>org.osgi</groupId> - <artifactId>org.osgi.core</artifactId> + <artifactId>*</artifactId> </exclusion> </exclusions> </dependency> @@ -217,7 +207,7 @@ <!-- WARNING: Removing jars from the list below usually requires a new major Vespa version. --> <!-- NOTE: This list must be kept in sync with ExportPackagesIT.java --> <argument>__REPLACE_VERSION__${project.build.directory}/dependency/guava.jar</argument> - <argument>${project.build.directory}/dependency/guice-no_aop.jar</argument> + <argument>${project.build.directory}/dependency/guice.jar</argument> <argument>${project.build.directory}/dependency/slf4j-api.jar</argument> <argument>${project.build.directory}/dependency/slf4j-jdk14.jar</argument> <argument>${project.build.directory}/dependency/jcl-over-slf4j.jar</argument> diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogFormatter.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogFormatter.java index 080f3d3f74b..f7f53d304df 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogFormatter.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogFormatter.java @@ -4,7 +4,7 @@ package com.yahoo.jdisc.core; import org.osgi.framework.Bundle; import org.osgi.framework.ServiceReference; import org.osgi.service.log.LogEntry; -import org.osgi.service.log.LogService; +import org.osgi.service.log.LogLevel; import java.io.PrintWriter; import java.io.StringWriter; @@ -86,17 +86,17 @@ class ConsoleLogFormatter { } private StringBuilder formatLevel(LogEntry entry, StringBuilder out) { - switch (entry.getLevel()) { - case LogService.LOG_ERROR: + switch (entry.getLogLevel()) { + case ERROR: out.append("error"); break; - case LogService.LOG_WARNING: + case WARN: out.append("warning"); break; - case LogService.LOG_INFO: + case INFO: out.append("info"); break; - case LogService.LOG_DEBUG: + case DEBUG: out.append("debug"); break; default: @@ -117,7 +117,7 @@ class ConsoleLogFormatter { private StringBuilder formatException(LogEntry entry, StringBuilder out) { Throwable t = entry.getException(); if (t != null) { - if (entry.getLevel() == LogService.LOG_INFO) { + if (entry.getLogLevel() == LogLevel.INFO) { out.append(": "); String msg = t.getMessage(); if (msg != null) { diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogListener.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogListener.java index 2cfa604109b..1d872bbcb64 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogListener.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ConsoleLogListener.java @@ -3,62 +3,64 @@ package com.yahoo.jdisc.core; import com.yahoo.net.HostName; import org.osgi.service.log.LogEntry; +import org.osgi.service.log.LogLevel; import org.osgi.service.log.LogListener; import java.io.PrintStream; +import java.util.Optional; /** * @author Vikas Panwar */ class ConsoleLogListener implements LogListener { - public static final int DEFAULT_LOG_LEVEL = Integer.MAX_VALUE; + public static final LogLevel DEFAULT_LOG_LEVEL = LogLevel.TRACE; private final ConsoleLogFormatter formatter; private final PrintStream out; - private final int maxLevel; + private final LogLevel maxLevel; ConsoleLogListener(PrintStream out, String serviceName, String logLevel) { this.out = out; this.formatter = new ConsoleLogFormatter(getHostname(), getProcessId(), serviceName); - this.maxLevel = parseLogLevel(logLevel); + this.maxLevel = parseLogLevel(logLevel).orElse(null); } @Override public void logged(LogEntry entry) { - if (entry.getLevel() > maxLevel) { + if (maxLevel == null || !maxLevel.implies(entry.getLogLevel())) { return; } out.println(formatter.formatEntry(entry)); } - public static int parseLogLevel(String logLevel) { + public static Optional<LogLevel> parseLogLevel(String logLevel) { if (logLevel == null || logLevel.isEmpty()) { - return DEFAULT_LOG_LEVEL; + return Optional.of(DEFAULT_LOG_LEVEL); } if (logLevel.equalsIgnoreCase("OFF")) { - return Integer.MIN_VALUE; + return Optional.empty(); } if (logLevel.equalsIgnoreCase("ERROR")) { - return 1; + return Optional.of(LogLevel.ERROR); } if (logLevel.equalsIgnoreCase("WARNING")) { - return 2; + return Optional.of(LogLevel.WARN); } if (logLevel.equalsIgnoreCase("INFO")) { - return 3; + return Optional.of(LogLevel.INFO); } if (logLevel.equalsIgnoreCase("DEBUG")) { - return 4; + return Optional.of(LogLevel.DEBUG); } if (logLevel.equalsIgnoreCase("ALL")) { - return Integer.MAX_VALUE; + return Optional.of(LogLevel.TRACE); } try { - return Integer.valueOf(logLevel); - } catch (NumberFormatException e) { + return Optional.of(LogLevel.values()[Integer.parseInt(logLevel)]); + } catch (NumberFormatException | IndexOutOfBoundsException e) { // fall through } - return DEFAULT_LOG_LEVEL; + return Optional.of(DEFAULT_LOG_LEVEL); } public static ConsoleLogListener newInstance() { diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogHandler.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogHandler.java index 48fdb2a0293..989bc26dd85 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogHandler.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogHandler.java @@ -4,6 +4,7 @@ package com.yahoo.jdisc.core; import com.google.common.collect.ImmutableMap; import org.osgi.framework.Bundle; import org.osgi.framework.ServiceReference; +import org.osgi.service.log.LogLevel; import org.osgi.service.log.LogService; import java.util.Dictionary; @@ -45,8 +46,9 @@ class OsgiLogHandler extends Handler { } @Override + @SuppressWarnings("deprecation") public void publish(LogRecord record) { - logService.log(new LogRecordReference(record), toServiceLevel(record.getLevel()), record.getMessage(), + logService.log(new LogRecordReference(record), toServiceLevel(record.getLevel()).ordinal(), record.getMessage(), record.getThrown()); } @@ -60,22 +62,22 @@ class OsgiLogHandler extends Handler { // empty } - public static int toServiceLevel(Level level) { + public static LogLevel toServiceLevel(Level level) { int val = level.intValue(); if (val >= Level.SEVERE.intValue()) { - return LogService.LOG_ERROR; + return LogLevel.ERROR; } if (val >= Level.WARNING.intValue()) { - return LogService.LOG_WARNING; + return LogLevel.WARN; } if (val >= Level.INFO.intValue()) { - return LogService.LOG_INFO; + return LogLevel.INFO; } // Level.CONFIG // Level.FINE // Level.FINER // Level.FINEST - return LogService.LOG_DEBUG; + return LogLevel.DEBUG; } private static <T> Map<String, T> createDictionary(T[] in) { diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogManager.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogManager.java deleted file mode 100644 index 0530d63fe7a..00000000000 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/OsgiLogManager.java +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.core; - -import org.osgi.framework.BundleContext; -import org.osgi.framework.ServiceReference; -import org.osgi.service.log.LogService; -import org.osgi.util.tracker.ServiceTracker; -import org.osgi.util.tracker.ServiceTrackerCustomizer; - -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.logging.Handler; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * TODO: unused, remove (not public api) - * - * @author Simon Thoresen Hult - */ -class OsgiLogManager implements LogService { - - private static final Object globalLock = new Object(); - private final CopyOnWriteArrayList<LogService> services = new CopyOnWriteArrayList<>(); - private final boolean configureLogLevel; - private ServiceTracker<LogService,LogService> tracker; - - OsgiLogManager(boolean configureLogLevel) { - this.configureLogLevel = configureLogLevel; - } - - public void install(BundleContext osgiContext) { - if (tracker != null) { - throw new IllegalStateException("OsgiLogManager already installed."); - } - tracker = new ServiceTracker<>(osgiContext, LogService.class, new ServiceTrackerCustomizer<>() { - - @Override - public LogService addingService(ServiceReference<LogService> reference) { - LogService service = osgiContext.getService(reference); - services.add(service); - return service; - } - - @Override - public void modifiedService(ServiceReference<LogService> reference, LogService service) { - - } - - @Override - public void removedService(ServiceReference<LogService> reference, LogService service) { - services.remove(service); - } - }); - tracker.open(); - synchronized (globalLock) { - Logger root = Logger.getLogger(""); - if (configureLogLevel) { - root.setLevel(Level.ALL); - } - for (Handler handler : root.getHandlers()) { - root.removeHandler(handler); - } - root.addHandler(new OsgiLogHandler(this)); - } - } - - public boolean uninstall() { - if (tracker == null) { - return false; - } - tracker.close(); // implicitly clears the services array - tracker = null; - return true; - } - - @Override - public void log(int level, String message) { - log(null, level, message, null); - } - - @Override - public void log(int level, String message, Throwable throwable) { - log(null, level, message, throwable); - } - - @SuppressWarnings("rawtypes") - @Override - public void log(ServiceReference serviceRef, int level, String message) { - log(serviceRef, level, message, null); - } - - @SuppressWarnings("rawtypes") - @Override - public void log(ServiceReference serviceRef, int level, String message, Throwable throwable) { - for (LogService obj : services) { - obj.log(serviceRef, level, message, throwable); - } - } - - public static OsgiLogManager newInstance() { - return new OsgiLogManager(System.getProperty("java.util.logging.config.file") == null); - } -} diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogFormatterTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogFormatterTestCase.java index 64130ddc125..4376ccb6c7e 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogFormatterTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogFormatterTestCase.java @@ -6,7 +6,7 @@ import org.mockito.Mockito; import org.osgi.framework.Bundle; import org.osgi.framework.ServiceReference; import org.osgi.service.log.LogEntry; -import org.osgi.service.log.LogService; +import org.osgi.service.log.LogLevel; import java.io.PrintWriter; import java.io.StringWriter; @@ -22,14 +22,14 @@ import static org.junit.jupiter.api.Assertions.assertEquals; public class ConsoleLogFormatterTestCase { private static final ConsoleLogFormatter SIMPLE_FORMATTER = new ConsoleLogFormatter(null, null, null); - private static final LogEntry SIMPLE_ENTRY = new MyEntry(0, 0, null); + private static final LogEntry SIMPLE_ENTRY = new MyEntry(0, LogLevel.AUDIT, null); // TODO: Should (at least) use ConsoleLogFormatter.ABSENCE_REPLACEMENT instead of literal '-'. See ticket 7128315. @Test void requireThatMillisecondsArePadded() { for (int i = 0; i < 10000; ++i) { - LogEntry entry = new MyEntry(i, 0, null); + LogEntry entry = new MyEntry(i, LogLevel.AUDIT, null); Instant instant = Instant.ofEpochMilli(i); assertEquals(String.format("%d.%06d\t-\t-\t-\t-\tunknown\t", instant.getEpochSecond(), instant.getNano() / 1000), SIMPLE_FORMATTER.formatEntry(entry)); @@ -70,7 +70,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatProcessIdIncludesThreadIdWhenAvailable() { - LogEntry entry = new MyEntry(0, 0, null).putProperty("THREAD_ID", "threadId"); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).putProperty("THREAD_ID", "threadId"); assertEquals("0.000000\t-\tprocessId/threadId\t-\t-\tunknown\t", new ConsoleLogFormatter(null, "processId", null).formatEntry(entry)); } @@ -93,7 +93,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatBundleNameIsIncluded() { - LogEntry entry = new MyEntry(0, 0, null).setBundleSymbolicName("bundleName"); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).setBundleSymbolicName("bundleName"); assertEquals("0.000000\t-\t-\t-\tbundleName\tunknown\t", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -106,7 +106,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatLoggerNameIsIncluded() { - LogEntry entry = new MyEntry(0, 0, null).putProperty("LOGGER_NAME", "loggerName"); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).putProperty("LOGGER_NAME", "loggerName"); assertEquals("0.000000\t-\t-\t-\t/loggerName\tunknown\t", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -119,7 +119,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatBundleAndLoggerNameIsCombined() { - LogEntry entry = new MyEntry(0, 0, null).setBundleSymbolicName("bundleName") + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).setBundleSymbolicName("bundleName") .putProperty("LOGGER_NAME", "loggerName"); assertEquals("0.000000\t-\t-\t-\tbundleName/loggerName\tunknown\t", SIMPLE_FORMATTER.formatEntry(entry)); @@ -129,34 +129,32 @@ public class ConsoleLogFormatterTestCase { void requireThatLevelNameIsIncluded() { ConsoleLogFormatter formatter = SIMPLE_FORMATTER; assertEquals("0.000000\t-\t-\t-\t-\terror\t", - formatter.formatEntry(new MyEntry(0, LogService.LOG_ERROR, null))); + formatter.formatEntry(new MyEntry(0, LogLevel.ERROR, null))); assertEquals("0.000000\t-\t-\t-\t-\twarning\t", - formatter.formatEntry(new MyEntry(0, LogService.LOG_WARNING, null))); + formatter.formatEntry(new MyEntry(0, LogLevel.WARN, null))); assertEquals("0.000000\t-\t-\t-\t-\tinfo\t", - formatter.formatEntry(new MyEntry(0, LogService.LOG_INFO, null))); + formatter.formatEntry(new MyEntry(0, LogLevel.INFO, null))); assertEquals("0.000000\t-\t-\t-\t-\tdebug\t", - formatter.formatEntry(new MyEntry(0, LogService.LOG_DEBUG, null))); - assertEquals("0.000000\t-\t-\t-\t-\tunknown\t", - formatter.formatEntry(new MyEntry(0, 69, null))); + formatter.formatEntry(new MyEntry(0, LogLevel.DEBUG, null))); } @Test void requireThatMessageIsIncluded() { - LogEntry entry = new MyEntry(0, 0, "message"); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, "message"); assertEquals("0.000000\t-\t-\t-\t-\tunknown\tmessage", SIMPLE_FORMATTER.formatEntry(entry)); } @Test void requireThatMessageIsOptional() { - LogEntry entry = new MyEntry(0, 0, null); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null); assertEquals("0.000000\t-\t-\t-\t-\tunknown\t", SIMPLE_FORMATTER.formatEntry(entry)); } @Test void requireThatMessageIsEscaped() { - LogEntry entry = new MyEntry(0, 0, "\\\n\r\t"); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, "\\\n\r\t"); assertEquals("0.000000\t-\t-\t-\t-\tunknown\t\\\\\\n\\r\\t", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -164,7 +162,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatExceptionIsIncluded() { Throwable t = new Throwable(); - LogEntry entry = new MyEntry(0, 0, null).setException(t); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).setException(t); assertEquals("0.000000\t-\t-\t-\t-\tunknown\t\\n" + formatThrowable(t), SIMPLE_FORMATTER.formatEntry(entry)); } @@ -172,7 +170,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatExceptionIsEscaped() { Throwable t = new Throwable("\\\n\r\t"); - LogEntry entry = new MyEntry(0, 0, null).setException(t); + LogEntry entry = new MyEntry(0, LogLevel.AUDIT, null).setException(t); assertEquals("0.000000\t-\t-\t-\t-\tunknown\t\\n" + formatThrowable(t), SIMPLE_FORMATTER.formatEntry(entry)); } @@ -180,7 +178,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatExceptionIsSimplifiedForInfoEntries() { Throwable t = new Throwable("exception"); - LogEntry entry = new MyEntry(0, LogService.LOG_INFO, "entry").setException(t); + LogEntry entry = new MyEntry(0, LogLevel.INFO, "entry").setException(t); assertEquals("0.000000\t-\t-\t-\t-\tinfo\tentry: exception", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -188,7 +186,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatSimplifiedExceptionIsEscaped() { Throwable t = new Throwable("\\\n\r\t"); - LogEntry entry = new MyEntry(0, LogService.LOG_INFO, "entry").setException(t); + LogEntry entry = new MyEntry(0, LogLevel.INFO, "entry").setException(t); assertEquals("0.000000\t-\t-\t-\t-\tinfo\tentry: \\\\\\n\\r\\t", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -196,7 +194,7 @@ public class ConsoleLogFormatterTestCase { @Test void requireThatSimplifiedExceptionMessageIsOptional() { Throwable t = new Throwable(); - LogEntry entry = new MyEntry(0, LogService.LOG_INFO, "entry").setException(t); + LogEntry entry = new MyEntry(0, LogLevel.INFO, "entry").setException(t); assertEquals("0.000000\t-\t-\t-\t-\tinfo\tentry: java.lang.Throwable", SIMPLE_FORMATTER.formatEntry(entry)); } @@ -210,13 +208,13 @@ public class ConsoleLogFormatterTestCase { private static class MyEntry implements LogEntry { final String message; - final int level; + final LogLevel level; final long time; Bundle bundle = null; ServiceReference<?> serviceReference = null; Throwable exception; - MyEntry(long time, int level, String message) { + MyEntry(long time, LogLevel level, String message) { this.message = message; this.level = level; this.time = time; @@ -244,9 +242,15 @@ public class ConsoleLogFormatterTestCase { return time; } - @Override + @Override public LogLevel getLogLevel() { return level; } + @Override public String getLoggerName() { return null; } + @Override public long getSequence() { return 0; } + @Override public String getThreadInfo() { return null; } + @Override public StackTraceElement getLocation() { return null; } + + @Override @SuppressWarnings("deprecation") public int getLevel() { - return level; + return level.ordinal(); } @Override diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogListenerTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogListenerTestCase.java index 0efefc21a2f..88d73f32550 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogListenerTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ConsoleLogListenerTestCase.java @@ -5,11 +5,12 @@ import org.junit.jupiter.api.Test; import org.osgi.framework.Bundle; import org.osgi.framework.ServiceReference; import org.osgi.service.log.LogEntry; +import org.osgi.service.log.LogLevel; import org.osgi.service.log.LogListener; -import org.osgi.service.log.LogService; import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.util.Optional; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -23,42 +24,35 @@ public class ConsoleLogListenerTestCase { @Test void requireThatLogLevelParserKnowsOsgiLogLevels() { - assertEquals(LogService.LOG_ERROR, ConsoleLogListener.parseLogLevel("ERROR")); - assertEquals(LogService.LOG_WARNING, ConsoleLogListener.parseLogLevel("WARNING")); - assertEquals(LogService.LOG_INFO, ConsoleLogListener.parseLogLevel("INFO")); - assertEquals(LogService.LOG_DEBUG, ConsoleLogListener.parseLogLevel("DEBUG")); + assertEquals(LogLevel.ERROR, ConsoleLogListener.parseLogLevel("ERROR").orElseThrow()); + assertEquals(LogLevel.WARN, ConsoleLogListener.parseLogLevel("WARNING").orElseThrow()); + assertEquals(LogLevel.INFO, ConsoleLogListener.parseLogLevel("INFO").orElseThrow()); + assertEquals(LogLevel.DEBUG, ConsoleLogListener.parseLogLevel("DEBUG").orElseThrow()); } @Test void requireThatLogLevelParserKnowsOff() { - assertEquals(Integer.MIN_VALUE, ConsoleLogListener.parseLogLevel("OFF")); + assertEquals(Optional.empty(), ConsoleLogListener.parseLogLevel("OFF")); } @Test void requireThatLogLevelParserKnowsAll() { - assertEquals(Integer.MAX_VALUE, ConsoleLogListener.parseLogLevel("ALL")); - } - - @Test - void requireThatLogLevelParserKnowsIntegers() { - for (int i = -69; i < 69; ++i) { - assertEquals(i, ConsoleLogListener.parseLogLevel(String.valueOf(i))); - } + assertEquals(LogLevel.TRACE, ConsoleLogListener.parseLogLevel("ALL").orElseThrow()); } @Test void requireThatLogLevelParserErrorsReturnDefault() { - assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel(null)); - assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel("")); - assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel("foo")); + assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel(null).orElseThrow()); + assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel("").orElseThrow()); + assertEquals(ConsoleLogListener.DEFAULT_LOG_LEVEL, ConsoleLogListener.parseLogLevel("foo").orElseThrow()); } @Test void requireThatLogEntryWithLevelAboveThresholdIsNotOutput() { ByteArrayOutputStream out = new ByteArrayOutputStream(); LogListener listener = new ConsoleLogListener(new PrintStream(out), null, "5"); - for (int i = 0; i < 10; ++i) { - listener.logged(new MyEntry(0, i, "message")); + for (LogLevel l : LogLevel.values()) { + listener.logged(new MyEntry(0, l, "message")); } // TODO: Should use ConsoleLogFormatter.ABSENCE_REPLACEMENT instead of literal '-'. See ticket 7128315. assertEquals("0.000000\t" + HOSTNAME + "\t" + PROCESS_ID + "\t-\t-\tunknown\tmessage\n" + @@ -73,10 +67,10 @@ public class ConsoleLogListenerTestCase { private static class MyEntry implements LogEntry { final String message; - final int level; + final LogLevel level; final long time; - MyEntry(long time, int level, String message) { + MyEntry(long time, LogLevel level, String message) { this.message = message; this.level = level; this.time = time; @@ -87,9 +81,15 @@ public class ConsoleLogListenerTestCase { return time; } - @Override + @Override public LogLevel getLogLevel() { return level; } + @Override public String getLoggerName() { return null; } + @Override public long getSequence() { return 0; } + @Override public String getThreadInfo() { return null; } + @Override public StackTraceElement getLocation() { return null; } + + @Override @SuppressWarnings("deprecation") public int getLevel() { - return level; + return level.ordinal(); } @Override diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java index b5a9e19bb2a..2d0b9ba9651 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java @@ -36,7 +36,7 @@ public class ExportPackagesIT { // This list must be kept in sync with the list in the export-packages execution in pom.xml. private static final List<String> RE_EXPORTED_BUNDLES = Stream.of( "guava.jar", - "guice-no_aop.jar", + "guice.jar", "slf4j-api.jar", "slf4j-jdk14.jar", "jcl-over-slf4j.jar", diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogHandlerTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogHandlerTestCase.java index 626cae67c41..f5a86b63ae5 100644 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogHandlerTestCase.java +++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogHandlerTestCase.java @@ -2,7 +2,9 @@ package com.yahoo.jdisc.core; import org.junit.jupiter.api.Test; +import org.osgi.framework.Bundle; import org.osgi.framework.ServiceReference; +import org.osgi.service.log.LogLevel; import org.osgi.service.log.LogService; import java.time.Instant; @@ -30,40 +32,40 @@ public class OsgiLogHandlerTestCase { Logger log = newLogger(logService); log.log(Level.INFO, "foo"); - assertEquals(OsgiLogHandler.toServiceLevel(Level.INFO), logService.lastLevel); + assertEquals(OsgiLogHandler.toServiceLevel(Level.INFO), logService.lastLevel()); assertEquals("foo", logService.lastMessage); assertNull(logService.lastThrowable); Throwable t = new Throwable(); log.log(Level.SEVERE, "bar", t); - assertEquals(OsgiLogHandler.toServiceLevel(Level.SEVERE), logService.lastLevel); + assertEquals(OsgiLogHandler.toServiceLevel(Level.SEVERE), logService.lastLevel()); assertEquals("bar", logService.lastMessage); assertEquals(t, logService.lastThrowable); } @Test void requireThatStadardLogLevelsAreConverted() { - assertLogLevel(LogService.LOG_ERROR, Level.SEVERE); - assertLogLevel(LogService.LOG_WARNING, Level.WARNING); - assertLogLevel(LogService.LOG_INFO, Level.INFO); - assertLogLevel(LogService.LOG_DEBUG, Level.CONFIG); - assertLogLevel(LogService.LOG_DEBUG, Level.FINE); - assertLogLevel(LogService.LOG_DEBUG, Level.FINER); - assertLogLevel(LogService.LOG_DEBUG, Level.FINEST); + assertLogLevel(LogLevel.ERROR, Level.SEVERE); + assertLogLevel(LogLevel.WARN, Level.WARNING); + assertLogLevel(LogLevel.INFO, Level.INFO); + assertLogLevel(LogLevel.DEBUG, Level.CONFIG); + assertLogLevel(LogLevel.DEBUG, Level.FINE); + assertLogLevel(LogLevel.DEBUG, Level.FINER); + assertLogLevel(LogLevel.DEBUG, Level.FINEST); } @Test void requireThatCustomLogLevelsAreConverted() { for (int i = Level.ALL.intValue() - 69; i < Level.OFF.intValue() + 69; ++i) { - int expectedLevel; + LogLevel expectedLevel; if (i >= Level.SEVERE.intValue()) { - expectedLevel = LogService.LOG_ERROR; + expectedLevel = LogLevel.ERROR; } else if (i >= Level.WARNING.intValue()) { - expectedLevel = LogService.LOG_WARNING; + expectedLevel = LogLevel.WARN; } else if (i >= Level.INFO.intValue()) { - expectedLevel = LogService.LOG_INFO; + expectedLevel = LogLevel.INFO; } else { - expectedLevel = LogService.LOG_DEBUG; + expectedLevel = LogLevel.DEBUG; } assertLogLevel(expectedLevel, new MyLogLevel(i)); } @@ -120,11 +122,11 @@ public class OsgiLogHandlerTestCase { assertNull(ref.getProperty("unknown")); } - private static void assertLogLevel(int expectedLevel, Level level) { + private static void assertLogLevel(LogLevel expectedLevel, Level level) { MyLogService logService = new MyLogService(); Logger log = newLogger(logService); log.log(level, "message"); - assertEquals(expectedLevel, logService.lastLevel); + assertEquals(expectedLevel, logService.lastLevel()); } @SuppressWarnings("unchecked") @@ -154,28 +156,36 @@ public class OsgiLogHandlerTestCase { String lastMessage; Throwable lastThrowable; - @Override + LogLevel lastLevel() { return LogLevel.values()[lastLevel]; } + + @Override @SuppressWarnings("deprecation") public void log(int level, String message) { log(null, level, message, null); } - @Override + @Override @SuppressWarnings("deprecation") public void log(int level, String message, Throwable throwable) { log(null, level, message, throwable); } - @Override + @Override @SuppressWarnings("deprecation") public void log(ServiceReference serviceReference, int level, String message) { log(serviceReference, level, message, null); } - @Override + @Override @SuppressWarnings("deprecation") public void log(ServiceReference serviceReference, int level, String message, Throwable throwable) { lastServiceReference = serviceReference; lastLevel = level; lastMessage = message; lastThrowable = throwable; } + + @Override public org.osgi.service.log.Logger getLogger(String s) { return null; } + @Override public org.osgi.service.log.Logger getLogger(Class<?> aClass) { return null; } + @Override public <L extends org.osgi.service.log.Logger> L getLogger(String s, Class<L> aClass) { return null; } + @Override public <L extends org.osgi.service.log.Logger> L getLogger(Class<?> aClass, Class<L> aClass1) { return null; } + @Override public <L extends org.osgi.service.log.Logger> L getLogger(Bundle bundle, String s, Class<L> aClass) { return null; } } private static class MyResourceBundle extends ResourceBundle { diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerTestCase.java deleted file mode 100644 index 7b5af97ca13..00000000000 --- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerTestCase.java +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.core; - -import com.yahoo.jdisc.test.TestDriver; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; -import org.osgi.framework.BundleContext; -import org.osgi.framework.BundleException; -import org.osgi.framework.ServiceReference; -import org.osgi.framework.ServiceRegistration; -import org.osgi.service.log.LogService; - -import java.util.logging.Level; -import java.util.logging.Logger; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertSame; - - -/** - * @author Simon Thoresen Hult - */ -public class OsgiLogManagerTestCase { - - @Test - void requireThatAllLogMethodsAreImplemented() throws BundleException { - FelixFramework felix = TestDriver.newOsgiFramework(); - felix.start(); - - BundleContext ctx = felix.bundleContext(); - OsgiLogManager manager = new OsgiLogManager(true); - manager.install(ctx); - MyLogService service = new MyLogService(); - ctx.registerService(LogService.class.getName(), service, null); - - manager.log(2, "a"); - assertLast(service, null, 2, "a", null); - - Throwable t1 = new Throwable(); - manager.log(4, "b", t1); - assertLast(service, null, 4, "b", t1); - - ServiceReference<?> ref1 = Mockito.mock(ServiceReference.class); - manager.log(ref1, 8, "c"); - assertLast(service, ref1, 8, "c", null); - - ServiceReference<?> ref2 = Mockito.mock(ServiceReference.class); - Throwable t2 = new Throwable(); - manager.log(ref2, 16, "d", t2); - assertLast(service, ref2, 16, "d", t2); - - manager.uninstall(); - felix.stop(); - } - - @Test - void requireThatLogManagerWritesToAllRegisteredLogServices() throws BundleException { - FelixFramework felix = TestDriver.newOsgiFramework(); - felix.start(); - - BundleContext ctx = felix.bundleContext(); - MyLogService foo = new MyLogService(); - ServiceRegistration<LogService> fooReg = ctx.registerService(LogService.class, foo, null); - - OsgiLogManager manager = new OsgiLogManager(true); - manager.install(ctx); - - ServiceReference<?> ref1 = Mockito.mock(ServiceReference.class); - Throwable t1 = new Throwable(); - manager.log(ref1, 2, "a", t1); - assertLast(foo, ref1, 2, "a", t1); - - MyLogService bar = new MyLogService(); - ServiceRegistration<LogService> barReg = ctx.registerService(LogService.class, bar, null); - - ServiceReference<?> ref2 = Mockito.mock(ServiceReference.class); - Throwable t2 = new Throwable(); - manager.log(ref2, 4, "b", t2); - assertLast(foo, ref2, 4, "b", t2); - assertLast(bar, ref2, 4, "b", t2); - - MyLogService baz = new MyLogService(); - ServiceRegistration<LogService> bazReg = ctx.registerService(LogService.class, baz, null); - - ServiceReference<?> ref3 = Mockito.mock(ServiceReference.class); - Throwable t3 = new Throwable(); - manager.log(ref3, 8, "c", t3); - assertLast(foo, ref3, 8, "c", t3); - assertLast(bar, ref3, 8, "c", t3); - assertLast(baz, ref3, 8, "c", t3); - - fooReg.unregister(); - - ServiceReference<?> ref4 = Mockito.mock(ServiceReference.class); - Throwable t4 = new Throwable(); - manager.log(ref4, 16, "d", t4); - assertLast(foo, ref3, 8, "c", t3); - assertLast(bar, ref4, 16, "d", t4); - assertLast(baz, ref4, 16, "d", t4); - - barReg.unregister(); - - ServiceReference<?> ref5 = Mockito.mock(ServiceReference.class); - Throwable t5 = new Throwable(); - manager.log(ref5, 32, "e", t5); - assertLast(foo, ref3, 8, "c", t3); - assertLast(bar, ref4, 16, "d", t4); - assertLast(baz, ref5, 32, "e", t5); - - bazReg.unregister(); - - ServiceReference<?> ref6 = Mockito.mock(ServiceReference.class); - Throwable t6 = new Throwable(); - manager.log(ref6, 64, "f", t6); - assertLast(foo, ref3, 8, "c", t3); - assertLast(bar, ref4, 16, "d", t4); - assertLast(baz, ref5, 32, "e", t5); - - manager.uninstall(); - felix.stop(); - } - - @Test - void requireThatRootLoggerModificationCanBeDisabled() throws BundleException { - Logger logger = Logger.getLogger(""); - logger.setLevel(Level.WARNING); - - new OsgiLogManager(false).install(Mockito.mock(BundleContext.class)); - assertEquals(Level.WARNING, logger.getLevel()); - - new OsgiLogManager(true).install(Mockito.mock(BundleContext.class)); - assertEquals(Level.ALL, logger.getLevel()); - } - - @Test - void requireThatRootLoggerLevelIsModifiedIfNoLoggerConfigIsGiven() { - Logger logger = Logger.getLogger(""); - logger.setLevel(Level.WARNING); - - OsgiLogManager.newInstance().install(Mockito.mock(BundleContext.class)); - - assertNull(System.getProperty("java.util.logging.config.file")); - assertEquals(Level.ALL, logger.getLevel()); - } - - private static void assertLast(MyLogService service, ServiceReference<?> ref, int level, String message, Throwable t) { - assertSame(ref, service.lastServiceReference); - assertEquals(level, service.lastLevel); - assertEquals(message, service.lastMessage); - assertSame(t, service.lastThrowable); - } - - @SuppressWarnings("rawtypes") - private static class MyLogService implements LogService { - - ServiceReference lastServiceReference; - int lastLevel; - String lastMessage; - Throwable lastThrowable; - - @Override - public void log(int level, String message) { - log(null, level, message, null); - } - - @Override - public void log(int level, String message, Throwable throwable) { - log(null, level, message, throwable); - } - - @Override - public void log(ServiceReference serviceReference, int level, String message) { - log(serviceReference, level, message, null); - } - - @Override - public void log(ServiceReference serviceReference, int level, String message, Throwable throwable) { - lastServiceReference = serviceReference; - lastLevel = level; - lastMessage = message; - lastThrowable = throwable; - } - } -} diff --git a/jdisc_core_test/integration_test/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerIntegrationTest.java b/jdisc_core_test/integration_test/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerIntegrationTest.java deleted file mode 100644 index 629bef6ded3..00000000000 --- a/jdisc_core_test/integration_test/src/test/java/com/yahoo/jdisc/core/OsgiLogManagerIntegrationTest.java +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.jdisc.core; - -import org.junit.Test; -import org.mockito.Mockito; -import org.osgi.framework.BundleContext; - -import java.util.logging.Level; -import java.util.logging.Logger; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - - -/** - * @author Simon Thoresen Hult - */ -public class OsgiLogManagerIntegrationTest { - - @Test - public void requireThatRootLoggerLevelIsNotModifiedIfLoggerConfigIsGiven() { - Logger logger = Logger.getLogger(""); - logger.setLevel(Level.WARNING); - - OsgiLogManager.newInstance().install(Mockito.mock(BundleContext.class)); - - assertNotNull(System.getProperty("java.util.logging.config.file")); - assertEquals(Level.WARNING, logger.getLevel()); - } -} diff --git a/linguistics-components/pom.xml b/linguistics-components/pom.xml index 19bb244b5d9..68b0437ac3f 100644 --- a/linguistics-components/pom.xml +++ b/linguistics-components/pom.xml @@ -87,7 +87,7 @@ <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> <scope>provided</scope> - <classifier>no_aop</classifier> + </dependency> <dependency> <groupId>com.yahoo.vespa</groupId> diff --git a/linguistics/pom.xml b/linguistics/pom.xml index 5db3302b597..8813af8b981 100644 --- a/linguistics/pom.xml +++ b/linguistics/pom.xml @@ -56,7 +56,7 @@ <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> <scope>provided</scope> - <classifier>no_aop</classifier> + </dependency> </dependencies> <build> diff --git a/lucene-linguistics/pom.xml b/lucene-linguistics/pom.xml index 929d33a0736..18f2b1a8574 100644 --- a/lucene-linguistics/pom.xml +++ b/lucene-linguistics/pom.xml @@ -63,7 +63,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + <scope>provided</scope> </dependency> <dependency> diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt index 04dd3680701..4b47810ea74 100644 --- a/maven-plugins/allowed-maven-dependencies.txt +++ b/maven-plugins/allowed-maven-dependencies.txt @@ -10,10 +10,11 @@ com.github.luben:zstd-jni:1.5.5-5 com.google.errorprone:error_prone_annotations:2.21.1 com.google.guava:failureaccess:1.0.1 com.google.guava:guava:32.1.2-jre -com.google.inject:guice:4.2.3 +com.google.inject:guice:6.0.0 com.google.j2objc:j2objc-annotations:2.8 commons-codec:commons-codec:1.16.0 commons-io:commons-io:2.13.0 +jakarta.inject:jakarta.inject-api:2.0.1 javax.annotation:javax.annotation-api:1.2 javax.inject:javax.inject:1 org.apache-extras.beanshell:bsh:2.0b6 @@ -67,8 +68,8 @@ org.vafer:jdependency:2.8.0 #[test-only] # Contains dependencies that are used exclusively in 'test' scope junit:junit:4.13.2 -net.bytebuddy:byte-buddy:1.14.6 -net.bytebuddy:byte-buddy-agent:1.14.6 +net.bytebuddy:byte-buddy:1.14.7 +net.bytebuddy:byte-buddy-agent:1.14.7 org.apiguardian:apiguardian-api:1.1.2 org.hamcrest:hamcrest:2.2 org.hamcrest:hamcrest-core:2.2 diff --git a/metrics-proxy/pom.xml b/metrics-proxy/pom.xml index ee1439fb5f6..af0dbaa83fb 100644 --- a/metrics-proxy/pom.xml +++ b/metrics-proxy/pom.xml @@ -111,8 +111,8 @@ <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <scope>test</scope> </dependency> <dependency> diff --git a/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java b/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java index d6018dc0633..cae6b258e54 100644 --- a/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java @@ -67,6 +67,7 @@ public enum SearchNodeMetrics implements VespaMetrics { CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED("content.proton.executor.field_writer.accepted", Unit.TASK, "Number of accepted executor field writer tasks"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS("content.proton.executor.field_writer.wakeups", Unit.WAKEUP, "Number of times a executor field writer worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION("content.proton.executor.field_writer.utilization", Unit.FRACTION, "Ratio of time the executor fieldwriter worker threads has been active"), + CONTENT_PROTON_EXECUTOR_FIELD_WRITER_SATURATION("content.proton.executor.field_writer.saturation", Unit.FRACTION, "Ratio indicating the max saturation of underlying worker threads. A higher saturation than utilization indicates a bottleneck in one of the worker threads."), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_REJECTED("content.proton.executor.field_writer.rejected", Unit.TASK, "Number of rejected tasks"), // jobs diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java index 2744f6e4ae0..2cfdcd85d9e 100644 --- a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java +++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java @@ -349,6 +349,7 @@ public class Vespa9VespaMetricSet { addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count)); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); + addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_SATURATION, EnumSet.of(max, sum, count)); // jobs addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java index 6c4626238eb..1b86819ddc0 100644 --- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java +++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java @@ -389,6 +389,7 @@ public class VespaMetricSet { addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS.rate()); addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count)); + addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_SATURATION, EnumSet.of(max, sum, count)); // jobs addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average()); diff --git a/model-integration/src/main/java/ai/vespa/embedding/EmbedderRuntime.java b/model-integration/src/main/java/ai/vespa/embedding/EmbedderRuntime.java index 45068db67f4..4e1442e6383 100644 --- a/model-integration/src/main/java/ai/vespa/embedding/EmbedderRuntime.java +++ b/model-integration/src/main/java/ai/vespa/embedding/EmbedderRuntime.java @@ -10,8 +10,8 @@ import com.yahoo.metrics.simple.Gauge; import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.metrics.simple.Point; -import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * @author bjorncs @@ -20,7 +20,7 @@ public class EmbedderRuntime implements Embedder.Runtime { private final Gauge embedLatency; private final Gauge sequenceLength; - private final Map<MetricDimensions, Point> metricPointCache = new HashMap<>(); + private final Map<MetricDimensions, Point> metricPointCache = new ConcurrentHashMap<>(); @Inject public EmbedderRuntime(MetricReceiver metrics) { diff --git a/node-admin/pom.xml b/node-admin/pom.xml index cf45e010d14..d24ff9a3655 100644 --- a/node-admin/pom.xml +++ b/node-admin/pom.xml @@ -86,11 +86,6 @@ <!-- Test --> <dependency> - <groupId>com.google.guava</groupId> - <artifactId>guava-testlib</artifactId> - <scope>test</scope> - </dependency> - <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> <scope>test</scope> diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java index 9662d4184df..4acdb51ee16 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java @@ -64,6 +64,7 @@ public class SystemCtl { public SystemCtlStart start(String unit) { return new SystemCtlStart(unit); } public SystemCtlStop stop(String unit) { return new SystemCtlStop(unit); } public SystemCtlRestart restart(String unit) { return new SystemCtlRestart(unit); } + public SystemCtlReload reload(String unit) { return new SystemCtlReload(unit); } public boolean serviceExists(TaskContext context, String unit) { return newCommandLine(context) @@ -155,6 +156,16 @@ public class SystemCtl { } } + public class SystemCtlReload extends SystemCtlCommand { + private SystemCtlReload(String unit) { + super("reload", unit); + } + + protected boolean isAlreadyConverged(TaskContext context) { + return false; + } + } + public abstract class SystemCtlCommand { private final String command; diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtlTester.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtlTester.java index 396a4f9f51f..21f060461af 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtlTester.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtlTester.java @@ -40,6 +40,11 @@ public class SystemCtlTester extends SystemCtl { return this; } + public SystemCtlTester expectReload(String unit) { + expectCommand("systemctl reload " + unit + " 2>&1", 0, ""); + return this; + } + public SystemCtlTester expectDaemonReload() { expectCommand("systemctl daemon-reload 2>&1", 0, ""); return this; diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/network/VersionedIpAddressTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/network/VersionedIpAddressTest.java index 32fbcf9f6a4..79664159b88 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/network/VersionedIpAddressTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/network/VersionedIpAddressTest.java @@ -1,12 +1,12 @@ package com.yahoo.vespa.hosted.node.admin.task.util.network; -import com.google.common.testing.EqualsTester; import org.junit.jupiter.api.Test; import java.util.List; import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * @author gjoranv @@ -51,12 +51,18 @@ public class VersionedIpAddressTest { @Test void equals_and_hashCode_are_implemented() { - new EqualsTester() - .addEqualityGroup(VersionedIpAddress.from("::1"), VersionedIpAddress.from("::1")) - .addEqualityGroup(VersionedIpAddress.from("::2")) - .addEqualityGroup(VersionedIpAddress.from("127.0.0.1"), VersionedIpAddress.from("127.0.0.1")) - .addEqualityGroup(VersionedIpAddress.from("10.0.0.1")) - .testEquals(); + var one = VersionedIpAddress.from("::1"); + var two = VersionedIpAddress.from("::2"); + var local = VersionedIpAddress.from("127.0.0.1"); + var ten = VersionedIpAddress.from("10.0.0.1"); + assertEquals(one, VersionedIpAddress.from("::1")); + assertNotEquals(one, two); + assertNotEquals(one, local); + assertNotEquals(one, ten); + + assertEquals(local, VersionedIpAddress.from("127.0.0.1")); + assertNotEquals(local, two); + assertNotEquals(local, 10); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java index e0199b5ddaf..48e50b935a4 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java @@ -8,18 +8,19 @@ import com.yahoo.component.AbstractComponent; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.io.IOUtils; -import com.yahoo.vespa.defaults.Defaults; import com.yahoo.yolean.concurrent.ConcurrentResourcePool; import io.questdb.cairo.CairoEngine; import io.questdb.cairo.CairoException; import io.questdb.cairo.DefaultCairoConfiguration; +import io.questdb.cairo.TableToken; import io.questdb.cairo.TableWriter; +import io.questdb.cairo.security.AllowAllSecurityContext; import io.questdb.cairo.sql.Record; import io.questdb.cairo.sql.RecordCursor; import io.questdb.cairo.sql.RecordCursorFactory; import io.questdb.griffin.CompiledQuery; -import io.questdb.griffin.QueryFuture; import io.questdb.griffin.SqlCompiler; +import io.questdb.griffin.SqlCompilerFactoryImpl; import io.questdb.griffin.SqlException; import io.questdb.griffin.SqlExecutionContext; import io.questdb.griffin.SqlExecutionContextImpl; @@ -40,7 +41,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; import static com.yahoo.vespa.defaults.Defaults.getDefaults; @@ -85,9 +85,9 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { this.dataDir = dataDir; engine = new CairoEngine(new DefaultCairoConfiguration(dataDir)); - sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine())); - nodeTable = new Table(dataDir, "metrics", clock); - clusterTable = new Table(dataDir, "clusterMetrics", clock); + sqlCompilerPool = new ConcurrentResourcePool<>(() -> SqlCompilerFactoryImpl.INSTANCE.getInstance(engine())); + nodeTable = new Table(dataDir, "metrics"); + clusterTable = new Table(dataDir, "clusterMetrics"); ensureTablesExist(); } @@ -236,7 +236,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { private void ensureClusterTableIsUpdated() { try { - if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) { + if (0 == engine().getTableStatus(new Path(), clusterTable.token())) { // Example: clusterTable.ensureColumnExists("write_rate", "float"); } } catch (Exception e) { @@ -349,13 +349,15 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { * Needs to be done for some queries, e.g. 'alter table' queries, see https://github.com/questdb/questdb/issues/1846 */ private void issueAsync(String sql, SqlExecutionContext context) throws SqlException { - try (QueryFuture future = issue(sql, context).execute(null)) { + try (var future = issue(sql, context).execute(null)) { future.await(); } } private SqlExecutionContext newContext() { - return new SqlExecutionContextImpl(engine(), 1); + CairoEngine engine = engine(); + return new SqlExecutionContextImpl(engine, 1) + .with(AllowAllSecurityContext.INSTANCE, null); } /** A questDb table */ @@ -363,25 +365,26 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { private final Object writeLock = new Object(); private final String name; - private final Clock clock; private final File dir; private long highestTimestampAdded = 0; - Table(String dataDir, String name, Clock clock) { + Table(String dataDir, String name) { this.name = name; - this.clock = clock; this.dir = new File(dataDir, name); IOUtils.createDirectory(dir.getPath()); // https://stackoverflow.com/questions/67785629/what-does-max-txn-txn-inflight-limit-reached-in-questdb-and-how-to-i-avoid-it new File(dir + "/_txn_scoreboard").delete(); } + private TableToken token() { return engine().getTableTokenIfExists(name); } boolean exists() { - return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name); + TableToken token = engine().getTableTokenIfExists(name); + if (token == null) return false; + return 0 == engine().getTableStatus(new Path(), token); } TableWriter getWriter() { - return engine().getWriter(newContext().getCairoSecurityContext(), name, "getWriter"); + return engine().getWriter(token(), "getWriter"); } void gc() { @@ -390,6 +393,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { issueAsync("alter table " + name + " drop partition where at < dateadd('d', -4, now());", newContext()); } catch (SqlException e) { + if (e.getMessage().contains("no partitions matched WHERE clause")) return; log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java index 91a10a1d08e..6b084704474 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java @@ -80,7 +80,7 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer { if ( ! canDeployNow(application)) return; // redeployment is no longer needed log.log(Level.INFO, () -> application + " will be redeployed" + (reason == null || reason.isBlank() ? "" : " due to " + reason) + - ", last activated " + activationTime(application)); + ", last deployed " + deployTime(application) + " and last activated " + activationTime(application)); deployment.activate(); } finally { pendingDeployments.remove(application); @@ -92,6 +92,11 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer { return deployer.activationTime(application).orElse(Instant.EPOCH); } + /** Returns the last time application was deployed. Epoch is returned if the application has never been deployed. */ + protected final Instant deployTime(ApplicationId application) { + return deployer.deployTime(application).orElse(Instant.EPOCH); + } + @Override public void shutdown() { super.shutdown(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java index 0c1d6291baa..ab310761140 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java @@ -1,7 +1,6 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; -import com.yahoo.concurrent.DaemonThreadFactory; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; @@ -14,8 +13,6 @@ import com.yahoo.yolean.Exceptions; import java.time.Duration; import java.util.List; import java.util.Optional; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.logging.Level; import java.util.logging.Logger; @@ -27,10 +24,8 @@ import java.util.logging.Logger; public class DiskReplacer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(DiskReplacer.class.getName()); - private static final int maxBatchSize = 100; private final HostProvisioner hostProvisioner; - private final ExecutorService executor = Executors.newCachedThreadPool(new DaemonThreadFactory("disk-replacer")); DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) { super(nodeRepository, interval, metric); @@ -39,10 +34,13 @@ public class DiskReplacer extends NodeRepositoryMaintainer { @Override protected double maintain() { - NodeList nodes = nodeRepository().nodes().list().rebuilding(true); + NodeList candidates = nodeRepository().nodes().list().rebuilding(true); + if (candidates.isEmpty()) { + return 0; + } int failures = 0; List<Node> rebuilding; - try (var locked = nodeRepository().nodes().lockAndGetAll(nodes.asList(), Optional.of(Duration.ofSeconds(10)))) { + try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) { rebuilding = locked.nodes().stream().map(NodeMutex::node).toList(); RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding); @@ -56,13 +54,7 @@ public class DiskReplacer extends NodeRepositoryMaintainer { interval() + ": " + Exceptions.toMessageString(entry.getValue())); } } - return this.asSuccessFactorDeviation(rebuilding.size(), failures); - } - - @Override - public void shutdown() { - super.shutdown(); - executor.shutdown(); + return asSuccessFactorDeviation(rebuilding.size(), failures); } } diff --git a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/DefaultLanguageDetectorContextGenerator.java b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/DefaultLanguageDetectorContextGenerator.java index 27c23d8d3e6..5893429f5d0 100644 --- a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/DefaultLanguageDetectorContextGenerator.java +++ b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/DefaultLanguageDetectorContextGenerator.java @@ -19,14 +19,15 @@ public class DefaultLanguageDetectorContextGenerator extends opennlp.tools.langd } @Override - public String[] getContext(CharSequence document) { + @SuppressWarnings("unchecked") + public <T extends CharSequence> T[] getContext(CharSequence document) { int[] normalized = normalizer.normalize(document).codePoints().map(Character::toLowerCase).toArray(); Set<String> grams = new HashSet<>(); for (int i = 0; i < normalized.length; i++) for (int j = minLength; j <= maxLength && i + j < normalized.length; j++) grams.add(new String(normalized, i, j)); - return grams.toArray(new String[grams.size()]); + return (T[])grams.toArray(new String[grams.size()]); } } diff --git a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpDetector.java b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpDetector.java index d7a7d3a4744..b89e93c9d24 100644 --- a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpDetector.java +++ b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpDetector.java @@ -83,6 +83,7 @@ class OpenNlpDetector implements Detector { return new Detection(detectLanguage(input), UTF_8.name(), false); } + @SuppressWarnings("removal") private Language detectLanguage(String input) { var prediction = detector.probingPredictLanguages(input, config).getLanguages()[0]; var result = prediction.getConfidence() > 0.02 ? languagesByISO3.get(prediction.getLang()) : null; diff --git a/parent/pom.xml b/parent/pom.xml index 0603ade1694..b1ea1e0dab9 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -307,7 +307,7 @@ --> <groupId>org.openrewrite.maven</groupId> <artifactId>rewrite-maven-plugin</artifactId> - <version>5.4.2</version> + <version>5.5.0</version> <configuration> <activeRecipes> <recipe>org.openrewrite.java.testing.junit5.JUnit5BestPractices</recipe> @@ -504,8 +504,8 @@ <version>3.1.9</version> </dependency> <dependency> - <groupId>com.github.tomakehurst</groupId> - <artifactId>wiremock-jre8-standalone</artifactId> + <groupId>org.wiremock</groupId> + <artifactId>wiremock-standalone</artifactId> <version>${wiremock.vespa.version}</version> </dependency> <dependency> @@ -541,11 +541,6 @@ </exclusions> </dependency> <dependency> - <groupId>com.google.guava</groupId> - <artifactId>guava-testlib</artifactId> - <version>${guava.vespa.version}</version> - </dependency> - <dependency> <groupId>com.google.jimfs</groupId> <artifactId>jimfs</artifactId> <version>${jimfs.vespa.version}</version> @@ -635,6 +630,16 @@ <version>${joda-time.vespa.version}</version> </dependency> <dependency> + <groupId>net.bytebuddy</groupId> + <artifactId>byte-buddy</artifactId> + <version>${byte-buddy.vespa.version}</version> + </dependency> + <dependency> + <groupId>net.bytebuddy</groupId> + <artifactId>byte-buddy-agent</artifactId> + <version>${byte-buddy.vespa.version}</version> + </dependency> + <dependency> <groupId>net.openhft</groupId> <artifactId>zero-allocation-hashing</artifactId> <version>${zero-allocation-hashing.vespa.version}</version> @@ -1120,6 +1125,12 @@ <groupId>xerces</groupId> <artifactId>xercesImpl</artifactId> <version>${xerces.vespa.version}</version> + <exclusions> + <exclusion> + <groupId>xml-apis</groupId> + <artifactId>xml-apis</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <!-- TODO: Remove on Vespa 9 --> <groupId>org.json</groupId> @@ -1132,11 +1143,6 @@ <version>${error-prone-annotations.vespa.version}</version> </dependency> <dependency> - <groupId>org.checkerframework</groupId> - <artifactId>checker-qual</artifactId> - <version>${checker-qual.vespa.version}</version> - </dependency> - <dependency> <groupId>com.google.http-client</groupId> <artifactId>google-http-client-apache-v2</artifactId> <version>1.43.3</version> @@ -1151,7 +1157,11 @@ <artifactId>google-auth-library-oauth2-http</artifactId> <version>1.19.0</version> </dependency> - + <dependency> + <groupId>jakarta.inject</groupId> + <artifactId>jakarta.inject-api</artifactId> + <version>${jakarta.inject.vespa.version}</version> + </dependency> </dependencies> </dependencyManagement> diff --git a/provided-dependencies/pom.xml b/provided-dependencies/pom.xml index 09d76265466..8bf84956a12 100755 --- a/provided-dependencies/pom.xml +++ b/provided-dependencies/pom.xml @@ -49,7 +49,7 @@ <dependency> <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> - <classifier>no_aop</classifier> + </dependency> <!-- Dependencies used by container-core --> diff --git a/screwdriver.yaml b/screwdriver.yaml index 849f4a01328..a118f7ba622 100644 --- a/screwdriver.yaml +++ b/screwdriver.yaml @@ -152,14 +152,14 @@ jobs: time make -C client/go BIN=$WORKDIR/vespa-install/opt/vespa/bin SHARE=$WORKDIR/vespa-install/usr/share install-all time ./bootstrap.sh java - time mvn -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS install + time ./mvnw -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS install cmake3 -DVESPA_UNPRIVILEGED=no $VESPA_CMAKE_SANITIZERS_OPTION . time make -j ${NUM_THREADS} time ctest3 --output-on-failure -j ${NUM_THREADS} ccache --show-stats time make -j ${NUM_THREADS} install DESTDIR=$WORKDIR/vespa-install - build-sample-apps: | - (cd $WORKDIR/sample-apps && time mvn -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS package) + (cd $WORKDIR/sample-apps && time ../vespa/mvnw -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS package) - verify-rpm-build: | cd $WORKDIR ulimit -c 0 diff --git a/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp b/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp index d80604919de..28899b40408 100644 --- a/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp +++ b/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp @@ -164,8 +164,8 @@ VerifyRankSetup::~VerifyRankSetup() = default; bool VerifyRankSetup::verify(const search::index::Schema &schema, - const search::fef::Properties &props, - const IRankingAssetsRepo &repo) + const search::fef::Properties &props, + const IRankingAssetsRepo &repo) { proton::matching::IndexEnvironment indexEnv(0, schema, props, repo); search::fef::BlueprintFactory factory; @@ -195,12 +195,12 @@ VerifyRankSetup::verify(const search::index::Schema &schema, bool VerifyRankSetup::verifyConfig(const VerifyRanksetupConfig &myCfg, - const RankProfilesConfig &rankCfg, - const IndexschemaConfig &schemaCfg, - const AttributesConfig &attributeCfg, - const RankingConstantsConfig &constantsCfg, - const RankingExpressionsConfig &expressionsCfg, - const OnnxModelsConfig &modelsCfg) + const RankProfilesConfig &rankCfg, + const IndexschemaConfig &schemaCfg, + const AttributesConfig &attributeCfg, + const RankingConstantsConfig &constantsCfg, + const RankingExpressionsConfig &expressionsCfg, + const OnnxModelsConfig &modelsCfg) { bool ok = true; search::index::Schema schema; diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp index 4af98801e92..28ec0482074 100644 --- a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp +++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp @@ -218,7 +218,7 @@ Fixture::initViewSet(ViewSet &views) TuneFileIndexManager(), TuneFileAttributes(), views._fileHeaderContext); auto attrMgr = make_shared<AttributeManager>(BASE_DIR, "test.subdb", TuneFileAttributes(), views._fileHeaderContext, std::make_shared<search::attribute::Interlock>(), - views._service.write().attributeFieldWriter(), views._service.write().shared(), views._hwInfo); + views._service.write().field_writer(), views._service.write().shared(), views._hwInfo); auto summaryMgr = make_shared<SummaryManager> (_summaryExecutor, search::LogDocumentStore::Config(), search::GrowStrategy(), BASE_DIR, TuneFileSummary(), views._fileHeaderContext,views._noTlSyncer, search::IBucketizer::SP()); @@ -318,7 +318,7 @@ struct MyFastAccessFeedView StoreOnlyFeedView::PersistentParams params(1, 1, DocTypeName(DOC_TYPE), 0, SubDbType::NOTREADY); auto mgr = make_shared<AttributeManager>(BASE_DIR, "test.subdb", TuneFileAttributes(), _fileHeaderContext, std::make_shared<search::attribute::Interlock>(), - _writeService.attributeFieldWriter(), _writeService.shared(), _hwInfo); + _writeService.field_writer(), _writeService.shared(), _hwInfo); auto writer = std::make_shared<AttributeWriter>(mgr); FastAccessFeedView::Context fastUpdateCtx(writer, _docIdLimit); _feedView.set(std::make_shared<FastAccessFeedView>(std::move(storeOnlyCtx), params, fastUpdateCtx)); diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp index 0b498a791b3..e558074f724 100644 --- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp +++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp @@ -1033,20 +1033,23 @@ TEST_F("require that underlying components are explorable", StoreOnlyExplorerFix { assertExplorer({}, f._explorer); EXPECT_TRUE(f._explorer.get_child("attribute").get() == nullptr); + EXPECT_TRUE(f._explorer.get_child("attributewriter").get() == nullptr); EXPECT_TRUE(f._explorer.get_child("index").get() == nullptr); } TEST_F("require that underlying components are explorable", FastAccessExplorerFixture) { - assertExplorer({"attribute"}, f._explorer); + assertExplorer({"attribute", "attributewriter"}, f._explorer); EXPECT_TRUE(f._explorer.get_child("attribute").get() != nullptr); + EXPECT_TRUE(f._explorer.get_child("attributewriter").get() != nullptr); EXPECT_TRUE(f._explorer.get_child("index").get() == nullptr); } TEST_F("require that underlying components are explorable", SearchableExplorerFixture) { - assertExplorer({"attribute", "index"}, f._explorer); + assertExplorer({"attribute", "attributewriter", "index"}, f._explorer); EXPECT_TRUE(f._explorer.get_child("attribute").get() != nullptr); + EXPECT_TRUE(f._explorer.get_child("attributewriter").get() != nullptr); EXPECT_TRUE(f._explorer.get_child("index").get() != nullptr); } diff --git a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp index bc02f460b4e..9bba8c12ee7 100644 --- a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp +++ b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp @@ -34,15 +34,6 @@ public: ThreadingServiceConfig::make())) { } - SequencedTaskExecutor* index_inverter() { - return to_concrete_type(service->indexFieldInverter()); - } - SequencedTaskExecutor* index_writer() { - return to_concrete_type(service->indexFieldWriter()); - } - SequencedTaskExecutor* attribute_writer() { - return to_concrete_type(service->attributeFieldWriter()); - } SequencedTaskExecutor* field_writer() { return to_concrete_type(*field_writer_executor); } @@ -57,9 +48,7 @@ assert_executor(SequencedTaskExecutor* exec, uint32_t exp_executors, uint32_t ex TEST_F(ExecutorThreadingServiceTest, shared_field_writer_specified_from_the_outside) { - EXPECT_EQ(field_writer(), index_inverter()); - EXPECT_EQ(field_writer(), index_writer()); - EXPECT_EQ(field_writer(), attribute_writer()); + EXPECT_EQ(field_writer(), &service->field_writer()); assert_executor(field_writer(), 3, 200); } @@ -69,9 +58,7 @@ TEST_F(ExecutorThreadingServiceTest, tasks_limits_can_be_updated) EXPECT_EQ(5, service->master_task_limit()); EXPECT_EQ(7, service->index().getTaskLimit()); EXPECT_EQ(11, service->summary().getTaskLimit()); - EXPECT_EQ(7, index_inverter()->first_executor()->getTaskLimit()); - EXPECT_EQ(7, index_writer()->first_executor()->getTaskLimit()); - EXPECT_EQ(7, attribute_writer()->first_executor()->getTaskLimit()); + EXPECT_EQ(7, field_writer()->first_executor()->getTaskLimit()); } GTEST_MAIN_RUN_ALL_TESTS() diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp index 9052d024871..e57b1b6a61d 100644 --- a/searchcore/src/tests/proton/index/fusionrunner_test.cpp +++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp @@ -192,8 +192,8 @@ void Test::createIndex(const string &dir, uint32_t id, bool fusion) { DocBuilder doc_builder(add_fields); auto schema = SchemaBuilder(doc_builder).add_all_indexes().build(); MemoryIndex memory_index(schema, MockFieldLengthInspector(), - _service.write().indexFieldInverter(), - _service.write().indexFieldWriter()); + _service.write().field_writer(), + _service.write().field_writer()); addDocument(doc_builder, memory_index, *_selector, id, id + 0, term); addDocument(doc_builder, memory_index, *_selector, id, id + 1, "bar"); addDocument(doc_builder, memory_index, *_selector, id, id + 2, "baz"); diff --git a/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt index 70a91b418a9..7e5d3accbc3 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt +++ b/searchcore/src/vespa/searchcore/proton/attribute/CMakeLists.txt @@ -3,8 +3,8 @@ vespa_add_library(searchcore_attribute STATIC SOURCES address_space_usage_stats.cpp attribute_aspect_delayer.cpp - attribute_collection_spec_factory.cpp attribute_collection_spec.cpp + attribute_collection_spec_factory.cpp attribute_config_inspector.cpp attribute_directory.cpp attribute_executor.cpp @@ -24,9 +24,10 @@ vespa_add_library(searchcore_attribute STATIC attribute_usage_stats.cpp attribute_vector_explorer.cpp attribute_writer.cpp - attributes_initializer_base.cpp + attribute_writer_explorer.cpp attributedisklayout.cpp attributemanager.cpp + attributes_initializer_base.cpp attributesconfigscout.cpp document_field_extractor.cpp document_field_populator.cpp diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_vector_explorer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_vector_explorer.cpp index 6244bdbea33..6f34fc512aa 100644 --- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_vector_explorer.cpp +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_vector_explorer.cpp @@ -2,25 +2,31 @@ #include "attribute_vector_explorer.h" #include "attribute_executor.h" +#include <vespa/searchcommon/attribute/config.h> +#include <vespa/searchlib/attribute/attributevector.h> +#include <vespa/searchlib/attribute/distance_metric_utils.h> #include <vespa/searchlib/attribute/i_enum_store.h> #include <vespa/searchlib/attribute/i_enum_store_dictionary.h> -#include <vespa/searchlib/attribute/multi_value_mapping.h> -#include <vespa/searchlib/attribute/attributevector.h> #include <vespa/searchlib/attribute/ipostinglistattributebase.h> -#include <vespa/searchlib/util/state_explorer_utils.h> +#include <vespa/searchlib/attribute/multi_value_mapping.h> #include <vespa/searchlib/tensor/i_tensor_attribute.h> +#include <vespa/searchlib/util/state_explorer_utils.h> #include <vespa/vespalib/data/slime/cursor.h> -using search::attribute::Status; using search::AddressSpaceUsage; using search::AttributeVector; using search::IEnumStore; using search::StateExplorerUtils; -using vespalib::AddressSpace; -using vespalib::MemoryUsage; -using search::attribute::MultiValueMappingBase; +using search::attribute::BasicType; +using search::attribute::CollectionType; +using search::attribute::Config; +using search::attribute::DistanceMetricUtils; using search::attribute::IAttributeVector; using search::attribute::IPostingListAttributeBase; +using search::attribute::MultiValueMappingBase; +using search::attribute::Status; +using vespalib::AddressSpace; +using vespalib::MemoryUsage; using namespace vespalib::slime; namespace proton { @@ -97,6 +103,39 @@ convertPostingBaseToSlime(const IPostingListAttributeBase &postingBase, Cursor & convertMemoryUsageToSlime(postingBase.getMemoryUsage(), object.setObject("memoryUsage")); } +vespalib::string +type_to_string(const Config& cfg) +{ + if (cfg.basicType().type() == BasicType::TENSOR) { + return cfg.tensorType().to_spec(); + } + if (cfg.collectionType().type() == CollectionType::SINGLE) { + return cfg.basicType().asString(); + } + return vespalib::string(cfg.collectionType().asString()) + + "<" + vespalib::string(cfg.basicType().asString()) + ">"; +} + +void +convert_config_to_slime(const Config& cfg, bool full, Cursor& object) +{ + object.setString("type", type_to_string(cfg)); + object.setBool("fast_search", cfg.fastSearch()); + object.setBool("filter", cfg.getIsFilter()); + object.setBool("paged", cfg.paged()); + if (full) { + if (cfg.basicType().type() == BasicType::TENSOR) { + object.setString("distance_metric", DistanceMetricUtils::to_string(cfg.distance_metric())); + } + if (cfg.hnsw_index_params().has_value()) { + const auto& hnsw_cfg = cfg.hnsw_index_params().value(); + auto& hnsw = object.setObject("hnsw"); + hnsw.setLong("max_links_per_node", hnsw_cfg.max_links_per_node()); + hnsw.setLong("neighbors_to_explore_at_insert", hnsw_cfg.neighbors_to_explore_at_insert()); + } + } +} + } AttributeVectorExplorer::AttributeVectorExplorer(std::unique_ptr<AttributeExecutor> executor) @@ -117,6 +156,7 @@ AttributeVectorExplorer::get_state_helper(const AttributeVector& attr, const ves const Status &status = attr.getStatus(); Cursor &object = inserter.insertObject(); if (full) { + convert_config_to_slime(attr.getConfig(), full, object.setObject("config")); StateExplorerUtils::status_to_slime(status, object.setObject("status")); convertGenerationToSlime(attr, object.setObject("generation")); convertAddressSpaceUsageToSlime(attr.getAddressSpaceUsage(), object.setObject("addressSpaceUsage")); @@ -144,13 +184,9 @@ AttributeVectorExplorer::get_state_helper(const AttributeVector& attr, const ves object.setLong("committedDocIdLimit", attr.getCommittedDocIdLimit()); object.setLong("createSerialNum", attr.getCreateSerialNum()); } else { - object.setLong("numDocs", status.getNumDocs()); - object.setLong("lastSerialNum", status.getLastSyncToken()); - object.setLong("allocatedMemory", status.getAllocated()); - object.setLong("usedMemory", status.getUsed()); - object.setLong("onHoldMemory", status.getOnHold()); - object.setLong("committedDocIdLimit", attr.getCommittedDocIdLimit()); + convert_config_to_slime(attr.getConfig(), full, object); + object.setLong("allocated_bytes", status.getAllocated()); } } -} // namespace proton +} diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.cpp new file mode 100644 index 00000000000..eed5f62de6b --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.cpp @@ -0,0 +1,49 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "attribute_writer.h" +#include "attribute_writer_explorer.h" +#include <vespa/searchlib/attribute/attributevector.h> +#include <vespa/vespalib/data/slime/cursor.h> + +using vespalib::slime::Cursor; +using vespalib::slime::Inserter; + +namespace proton { + +AttributeWriterExplorer::AttributeWriterExplorer(std::shared_ptr<IAttributeWriter> writer) + : _writer(std::move(writer)) +{ +} + +AttributeWriterExplorer::~AttributeWriterExplorer() = default; + +namespace { + +void +convert_to_slime(const AttributeWriter::WriteContext& context, Cursor& object) +{ + object.setLong("executor_id", context.getExecutorId().getId()); + Cursor& fields = object.setArray("fields"); + for (const auto& field : context.getFields()) { + fields.addString(field.getAttribute().getName()); + } +} + +} + +void +AttributeWriterExplorer::get_state(const Inserter& inserter, bool full) const +{ + Cursor& object = inserter.insertObject(); + if (full) { + auto* writer = dynamic_cast<AttributeWriter*>(_writer.get()); + if (writer) { + Cursor& contexts = object.setArray("write_contexts"); + for (const auto& context : writer->get_write_contexts()) { + convert_to_slime(context, contexts.addObject()); + } + } + } +} + +} diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.h b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.h new file mode 100644 index 00000000000..5569c2ae132 --- /dev/null +++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_writer_explorer.h @@ -0,0 +1,25 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "i_attribute_writer.h" +#include <vespa/vespalib/net/http/state_explorer.h> + +namespace proton { + +/** + * Class used to explore the state of an attribute writer and its write contexts. + */ +class AttributeWriterExplorer : public vespalib::StateExplorer { +private: + std::shared_ptr<IAttributeWriter> _writer; + +public: + AttributeWriterExplorer(std::shared_ptr<IAttributeWriter> writer); + ~AttributeWriterExplorer(); + + void get_state(const vespalib::slime::Inserter& inserter, bool full) const override; +}; + +} + diff --git a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp index 219283dce04..e4c47d1deae 100644 --- a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp @@ -23,8 +23,8 @@ MemoryIndexWrapper::MemoryIndexWrapper(const search::index::Schema& schema, const TuneFileIndexing& tuneFileIndexing, searchcorespi::index::IThreadingService& threadingService, search::SerialNum serialNum) - : _index(schema, inspector, threadingService.indexFieldInverter(), - threadingService.indexFieldWriter()), + : _index(schema, inspector, threadingService.field_writer(), + threadingService.field_writer()), _serialNum(serialNum), _fileHeaderContext(fileHeaderContext), _tuneFileIndexing(tuneFileIndexing) diff --git a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h index 3283bfb68eb..2157e6a49ec 100644 --- a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h +++ b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h @@ -94,6 +94,10 @@ public: _index.pruneRemovedFields(schema); } void flushToDisk(const vespalib::string &flushDir, uint32_t docIdLimit, SerialNum serialNum) override; + + void insert_write_context_state(vespalib::slime::Cursor& object) const override { + _index.insert_write_context_state(object); + } }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp index 030dba4d826..43b38c8d812 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp +++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp @@ -24,9 +24,10 @@ ExecutorThreadingServiceMetrics::update(const ExecutorThreadingServiceStats &sta master.update(stats.getMasterExecutorStats()); index.update(stats.getIndexExecutorStats()); summary.update(stats.getSummaryExecutorStats()); - indexFieldInverter.update(stats.getIndexFieldInverterExecutorStats()); - indexFieldWriter.update(stats.getIndexFieldWriterExecutorStats()); - attributeFieldWriter.update(stats.getAttributeFieldWriterExecutorStats()); + vespalib::ExecutorStats empty_stats; + indexFieldInverter.update(empty_stats); + indexFieldWriter.update(empty_stats); + attributeFieldWriter.update(empty_stats); } } diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp index 63644e5c7ab..9d22344eb2b 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp +++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.cpp @@ -6,16 +6,10 @@ namespace proton { ExecutorThreadingServiceStats::ExecutorThreadingServiceStats(Stats masterExecutorStats, Stats indexExecutorStats, - Stats summaryExecutorStats, - Stats indexFieldInverterExecutorStats, - Stats indexFieldWriterExecutorStats, - Stats attributeFieldWriterExecutorStats) + Stats summaryExecutorStats) : _masterExecutorStats(masterExecutorStats), _indexExecutorStats(indexExecutorStats), - _summaryExecutorStats(summaryExecutorStats), - _indexFieldInverterExecutorStats(indexFieldInverterExecutorStats), - _indexFieldWriterExecutorStats(indexFieldWriterExecutorStats), - _attributeFieldWriterExecutorStats(attributeFieldWriterExecutorStats) + _summaryExecutorStats(summaryExecutorStats) { } diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h index 8015ec83ae9..121ca6038b6 100644 --- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h +++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_stats.h @@ -16,24 +16,15 @@ private: Stats _masterExecutorStats; Stats _indexExecutorStats; Stats _summaryExecutorStats; - Stats _indexFieldInverterExecutorStats; - Stats _indexFieldWriterExecutorStats; - Stats _attributeFieldWriterExecutorStats; public: ExecutorThreadingServiceStats(Stats masterExecutorStats, Stats indexExecutorStats, - Stats summaryExecutorStats, - Stats indexFieldInverterExecutorStats, - Stats indexFieldWriterExecutorStats, - Stats attributeFieldWriterExecutorStats); + Stats summaryExecutorStats); ~ExecutorThreadingServiceStats(); const Stats &getMasterExecutorStats() const { return _masterExecutorStats; } const Stats &getIndexExecutorStats() const { return _indexExecutorStats; } const Stats &getSummaryExecutorStats() const { return _summaryExecutorStats; } - const Stats &getIndexFieldInverterExecutorStats() const { return _indexFieldInverterExecutorStats; } - const Stats &getIndexFieldWriterExecutorStats() const { return _indexFieldWriterExecutorStats; } - const Stats &getAttributeFieldWriterExecutorStats() const { return _attributeFieldWriterExecutorStats; } }; } diff --git a/searchcore/src/vespa/searchcore/proton/server/document_subdb_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/document_subdb_explorer.cpp index 8049e76d4b6..cc657b04459 100644 --- a/searchcore/src/vespa/searchcore/proton/server/document_subdb_explorer.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/document_subdb_explorer.cpp @@ -1,10 +1,10 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "document_subdb_explorer.h" - #include <vespa/searchcore/proton/attribute/attribute_manager_explorer.h> -#include <vespa/searchcore/proton/documentmetastore/document_meta_store_explorer.h> +#include <vespa/searchcore/proton/attribute/attribute_writer_explorer.h> #include <vespa/searchcore/proton/docsummary/document_store_explorer.h> +#include <vespa/searchcore/proton/documentmetastore/document_meta_store_explorer.h> #include <vespa/searchcorespi/index/index_manager_explorer.h> using searchcorespi::IndexManagerExplorer; @@ -18,6 +18,7 @@ namespace { const vespalib::string DOCUMENT_META_STORE = "documentmetastore"; const vespalib::string DOCUMENT_STORE = "documentstore"; const vespalib::string ATTRIBUTE = "attribute"; +const vespalib::string ATTRIBUTE_WRITER = "attributewriter"; const vespalib::string INDEX = "index"; } @@ -38,10 +39,13 @@ std::vector<vespalib::string> DocumentSubDBExplorer::get_children_names() const { std::vector<vespalib::string> children = {DOCUMENT_META_STORE, DOCUMENT_STORE}; - if (_subDb.getAttributeManager().get() != nullptr) { + if (_subDb.getAttributeManager()) { children.push_back(ATTRIBUTE); } - if (_subDb.getIndexManager().get() != nullptr) { + if (_subDb.get_attribute_writer()) { + children.push_back(ATTRIBUTE_WRITER); + } + if (_subDb.getIndexManager()) { children.push_back(INDEX); } return children; @@ -53,22 +57,27 @@ DocumentSubDBExplorer::get_child(vespalib::stringref name) const if (name == DOCUMENT_META_STORE) { // TODO(geirst): Avoid const cast by adding const interface to // IDocumentMetaStoreContext as seen from IDocumentSubDB. - return std::unique_ptr<StateExplorer>(new DocumentMetaStoreExplorer( - (const_cast<IDocumentSubDB &>(_subDb)).getDocumentMetaStoreContext().getReadGuard())); + return std::make_unique<DocumentMetaStoreExplorer>( + (const_cast<IDocumentSubDB &>(_subDb)).getDocumentMetaStoreContext().getReadGuard()); } else if (name == DOCUMENT_STORE) { - return std::unique_ptr<StateExplorer>(new DocumentStoreExplorer(_subDb.getSummaryManager())); + return std::make_unique<DocumentStoreExplorer>(_subDb.getSummaryManager()); } else if (name == ATTRIBUTE) { - proton::IAttributeManager::SP attrMgr = _subDb.getAttributeManager(); - if (attrMgr.get() != nullptr) { - return std::unique_ptr<StateExplorer>(new AttributeManagerExplorer(attrMgr)); + auto attrMgr = _subDb.getAttributeManager(); + if (attrMgr) { + return std::make_unique<AttributeManagerExplorer>(attrMgr); + } + } else if (name == ATTRIBUTE_WRITER) { + auto writer = _subDb.get_attribute_writer(); + if (writer) { + return std::make_unique<AttributeWriterExplorer>(std::move(writer)); } } else if (name == INDEX) { - searchcorespi::IIndexManager::SP idxMgr = _subDb.getIndexManager(); - if (idxMgr.get() != nullptr) { - return std::unique_ptr<StateExplorer>(new IndexManagerExplorer(std::move(idxMgr))); + auto idxMgr = _subDb.getIndexManager(); + if (idxMgr) { + return std::make_unique<IndexManagerExplorer>(std::move(idxMgr)); } } - return std::unique_ptr<StateExplorer>(); + return {}; } -} // namespace proton +} diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp index 3f28f75c521..b0d168fab03 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp @@ -413,7 +413,7 @@ DocumentDB::applySubDBConfig(const DocumentDBConfig &newConfigSnapshot, auto newDocType = newRepo->getDocumentType(_docTypeName.getName()); assert(newDocType != nullptr); DocumentDBReferenceResolver resolver(*registry, *newDocType, newConfigSnapshot.getImportedFieldsConfig(), *oldDocType, - _refCount, _writeService.attributeFieldWriter(), _state.getAllowReconfig()); + _refCount, _writeService.field_writer(), _state.getAllowReconfig()); _subDBs.applyConfig(newConfigSnapshot, *_activeConfigSnapshot, serialNum, params, resolver, prepared_reconfig); } @@ -535,7 +535,7 @@ DocumentDB::tearDownReferences() auto docType = repo->getDocumentType(_docTypeName.getName()); assert(docType != nullptr); DocumentDBReferenceResolver resolver(*registry, *docType, activeConfig->getImportedFieldsConfig(), *docType, - _refCount, _writeService.attributeFieldWriter(), false); + _refCount, _writeService.field_writer(), false); _subDBs.tearDownReferences(resolver); registry->remove(_docTypeName.getName()); } diff --git a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp index 9dfebaa825d..9bd60af81ca 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executor_threading_service_explorer.cpp @@ -24,9 +24,7 @@ ExecutorThreadingServiceExplorer::get_state(const vespalib::slime::Inserter& ins convert_executor_to_slime(&_service.master(), object.setObject("master")); convert_executor_to_slime(&_service.index(), object.setObject("index")); convert_executor_to_slime(&_service.summary(), object.setObject("summary")); - convert_executor_to_slime(&_service.indexFieldInverter(), object.setObject("index_field_inverter")); - convert_executor_to_slime(&_service.indexFieldWriter(), object.setObject("index_field_writer")); - convert_executor_to_slime(&_service.attributeFieldWriter(), object.setObject("attribute_field_writer")); + convert_executor_to_slime(&_service.field_writer(), object.setObject("field_writer")); } } diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp index 4798ebd8fbc..01fc0ff3600 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp @@ -65,9 +65,7 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::Executor & sharedEx _summaryExecutor(createExecutorWithOneThread(cfg, CpuUsage::wrap(summary_executor, CpuUsage::Category::WRITE))), _masterService(_masterExecutor), _indexService(*_indexExecutor), - _index_field_inverter(field_writer), - _index_field_writer(field_writer), - _attribute_field_writer(field_writer), + _field_writer(field_writer), _invokeRegistrations() { if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) { @@ -92,11 +90,11 @@ void ExecutorThreadingService::shutdown() { _masterExecutor.shutdown().sync(); - _attribute_field_writer.sync_all(); + _field_writer.sync_all(); _summaryExecutor->shutdown().sync(); _indexExecutor->shutdown().sync(); - _index_field_inverter.sync_all(); - _index_field_writer.sync_all(); + _field_writer.sync_all(); + _field_writer.sync_all(); } void @@ -107,10 +105,7 @@ ExecutorThreadingService::set_task_limits(uint32_t master_task_limit, _master_task_limit.store(master_task_limit, std::memory_order_release); _indexExecutor->setTaskLimit(field_task_limit); _summaryExecutor->setTaskLimit(summary_task_limit); - // TODO: Move this to a common place when the field writer is always shared. - _index_field_inverter.setTaskLimit(field_task_limit); - _index_field_writer.setTaskLimit(field_task_limit); - _attribute_field_writer.setTaskLimit(field_task_limit); + _field_writer.setTaskLimit(field_task_limit); } ExecutorThreadingServiceStats @@ -119,26 +114,13 @@ ExecutorThreadingService::getStats() auto master_stats = _masterExecutor.getStats(); auto index_stats = _indexExecutor->getStats(); auto summary_stats = _summaryExecutor->getStats(); - vespalib::ExecutorStats empty_stats; - // In this case the field writer stats are reported at a higher level. - return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats, - empty_stats, empty_stats, empty_stats); + return ExecutorThreadingServiceStats(master_stats, index_stats, summary_stats); } vespalib::ISequencedTaskExecutor & -ExecutorThreadingService::indexFieldInverter() { - return _index_field_inverter; +ExecutorThreadingService::field_writer() { + return _field_writer; } -vespalib::ISequencedTaskExecutor & -ExecutorThreadingService::indexFieldWriter() { - return _index_field_writer; } -vespalib::ISequencedTaskExecutor & -ExecutorThreadingService::attributeFieldWriter() { - return _attribute_field_writer; -} - -} // namespace proton - diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h index 9da4348c619..ad630c6b1e7 100644 --- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h +++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h @@ -29,9 +29,7 @@ private: std::unique_ptr<vespalib::SyncableThreadExecutor> _summaryExecutor; SyncableExecutorThreadService _masterService; ExecutorThreadService _indexService; - vespalib::ISequencedTaskExecutor& _index_field_inverter; - vespalib::ISequencedTaskExecutor& _index_field_writer; - vespalib::ISequencedTaskExecutor& _attribute_field_writer; + vespalib::ISequencedTaskExecutor& _field_writer; std::vector<Registration> _invokeRegistrations; public: @@ -77,9 +75,7 @@ public: return _sharedExecutor; } - vespalib::ISequencedTaskExecutor &indexFieldInverter() override; - vespalib::ISequencedTaskExecutor &indexFieldWriter() override; - vespalib::ISequencedTaskExecutor &attributeFieldWriter() override; + vespalib::ISequencedTaskExecutor &field_writer() override; FNET_Transport &transport() override { return _transport; } const vespalib::Clock &clock() const override { return _clock; } ExecutorThreadingServiceStats getStats(); diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp index 8a3de22656b..140bc4d170c 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp @@ -70,7 +70,7 @@ FastAccessDocSubDB::createAttributeManagerInitializer(const DocumentDBConfig &co configSnapshot.getTuneFileDocumentDBSP()->_attr, _fileHeaderContext, _attribute_interlock, - _writeService.attributeFieldWriter(), + _writeService.field_writer(), _writeService.shared(), attrFactory, _hwInfo); @@ -282,6 +282,12 @@ FastAccessDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const return tasks; } +std::shared_ptr<IAttributeWriter> +FastAccessDocSubDB::get_attribute_writer() const +{ + return _fastAccessFeedView.get()->getAttributeWriter(); +} + proton::IAttributeManager::SP FastAccessDocSubDB::getAttributeManager() const { diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h index 942d42b5ad3..3a6eeef7dac 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h @@ -119,6 +119,7 @@ public: applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot, SerialNum serialNum, const ReconfigParams ¶ms, IDocumentDBReferenceResolver &resolver, const DocumentSubDBReconfig& prepared_reconfig) override; + std::shared_ptr<IAttributeWriter> get_attribute_writer() const override; std::shared_ptr<IAttributeManager> getAttributeManager() const override; IDocumentRetriever::UP getDocumentRetriever() override; void onReplayDone() override; diff --git a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h index 506af3f6355..92fa0dcad43 100644 --- a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h @@ -30,6 +30,7 @@ class DocumentSubDBReconfig; class DocumentSubDbInitializer; class DocumentSubDbInitializerResult; class FeedHandler; +class IAttributeWriter; class IDocumentDBReference; class IDocumentRetriever; class IFeedView; @@ -92,6 +93,7 @@ public: virtual std::shared_ptr<IFeedView> getFeedView() const = 0; virtual void clearViews() = 0; virtual const std::shared_ptr<ISummaryManager> &getSummaryManager() const = 0; + virtual std::shared_ptr<IAttributeWriter> get_attribute_writer() const = 0; virtual std::shared_ptr<IAttributeManager> getAttributeManager() const = 0; virtual const std::shared_ptr<searchcorespi::IIndexManager> &getIndexManager() const = 0; virtual const std::shared_ptr<ISummaryAdapter> &getSummaryAdapter() const = 0; diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp index 4f694bcfd38..b846a3eceff 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp @@ -348,6 +348,12 @@ SearchableDocSubDB::clearViews() { Parent::clearViews(); } +std::shared_ptr<IAttributeWriter> +SearchableDocSubDB::get_attribute_writer() const +{ + return _rFeedView.get()->getAttributeWriter(); +} + TransientResourceUsage SearchableDocSubDB::get_transient_resource_usage() const { diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h index 97172f23bcb..7cfe2b5f444 100644 --- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h @@ -109,6 +109,8 @@ public: void clearViews() override; + std::shared_ptr<IAttributeWriter> get_attribute_writer() const override; + std::shared_ptr<IAttributeManager> getAttributeManager() const override { return _rSearchView.get()->getAttributeManager(); } diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h index 1c956a4c45c..7a6bca2cd1f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h +++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h @@ -214,6 +214,7 @@ public: void clearViews() override; const ISummaryManager::SP &getSummaryManager() const override { return _iSummaryMgr; } + std::shared_ptr<IAttributeWriter> get_attribute_writer() const override { return {}; } IAttributeManager::SP getAttributeManager() const override; const std::shared_ptr<searchcorespi::IIndexManager> & getIndexManager() const override; const ISummaryAdapter::SP & getSummaryAdapter() const override { return _summaryAdapter; } diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h index d53016d7d8d..12603f1195a 100644 --- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h +++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h @@ -66,14 +66,9 @@ struct DummyDocumentSubDb : public IDocumentSubDB IFeedView::SP getFeedView() const override { return IFeedView::SP(); } void clearViews() override {} const ISummaryManager::SP &getSummaryManager() const override { return _summaryManager; } - proton::IAttributeManager::SP getAttributeManager() const override { - return proton::IAttributeManager::SP(); - } - - void validateDocStore(FeedHandler &, SerialNum ) const override { - - } - + std::shared_ptr<IAttributeWriter> get_attribute_writer() const override { return {}; } + proton::IAttributeManager::SP getAttributeManager() const override { return {}; } + void validateDocStore(FeedHandler &, SerialNum ) const override {} const IIndexManager::SP &getIndexManager() const override { return _indexManager; } const ISummaryAdapter::SP &getSummaryAdapter() const override { return _summaryAdapter; } const IIndexWriter::SP &getIndexWriter() const override { return _indexWriter; } diff --git a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.cpp b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.cpp index 4112564632a..2790c0962b4 100644 --- a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.cpp +++ b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.cpp @@ -10,9 +10,7 @@ ThreadingServiceObserver::ThreadingServiceObserver(searchcorespi::index::IThread _index(service.index()), _summary(service.summary()), _shared(service.shared()), - _indexFieldInverter(_service.indexFieldInverter()), - _indexFieldWriter(_service.indexFieldWriter()), - _attributeFieldWriter(_service.attributeFieldWriter()) + _field_writer(_service.field_writer()) { } diff --git a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h index 16b466a2275..56b5fea293c 100644 --- a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h +++ b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h @@ -16,9 +16,7 @@ private: ThreadServiceObserver _index; ThreadExecutorObserver _summary; vespalib::Executor & _shared; - vespalib::SequencedTaskExecutorObserver _indexFieldInverter; - vespalib::SequencedTaskExecutorObserver _indexFieldWriter; - vespalib::SequencedTaskExecutorObserver _attributeFieldWriter; + vespalib::SequencedTaskExecutorObserver _field_writer; public: ThreadingServiceObserver(searchcorespi::index::IThreadingService &service); @@ -51,17 +49,9 @@ public: } FNET_Transport & transport() override { return _service.transport(); } const vespalib::Clock & clock() const override { return _service.clock(); } - vespalib::ISequencedTaskExecutor &indexFieldInverter() override { - return _indexFieldInverter; + vespalib::ISequencedTaskExecutor &field_writer() override { + return _field_writer; } - vespalib::ISequencedTaskExecutor &indexFieldWriter() override { - return _indexFieldWriter; - } - - vespalib::ISequencedTaskExecutor &attributeFieldWriter() override { - return _attributeFieldWriter; - } - }; } diff --git a/searchcore/src/vespa/searchcorespi/index/imemoryindex.h b/searchcore/src/vespa/searchcorespi/index/imemoryindex.h index 1b370dff262..130042bc048 100644 --- a/searchcore/src/vespa/searchcorespi/index/imemoryindex.h +++ b/searchcore/src/vespa/searchcorespi/index/imemoryindex.h @@ -7,8 +7,9 @@ #include <vespa/vespalib/stllike/string.h> #include <vespa/vespalib/util/memoryusage.h> -namespace vespalib { class IDestructorCallback; } namespace document { class Document; } +namespace vespalib { class IDestructorCallback; } +namespace vespalib::slime { struct Cursor; } namespace searchcorespi::index { /** @@ -78,6 +79,8 @@ struct IMemoryIndex : public searchcorespi::IndexSearchable { virtual void pruneRemovedFields(const search::index::Schema &schema) = 0; virtual search::index::Schema::SP getPrunedSchema() const = 0; + + virtual void insert_write_context_state(vespalib::slime::Cursor& object) const = 0; }; } diff --git a/searchcore/src/vespa/searchcorespi/index/index_manager_explorer.cpp b/searchcore/src/vespa/searchcorespi/index/index_manager_explorer.cpp index 855f3c69bc9..1634937f094 100644 --- a/searchcore/src/vespa/searchcorespi/index/index_manager_explorer.cpp +++ b/searchcore/src/vespa/searchcorespi/index/index_manager_explorer.cpp @@ -2,7 +2,8 @@ #include "index_manager_explorer.h" #include "index_manager_stats.h" - +#include <vespa/searchcorespi/index/imemoryindex.h> +#include <vespa/searchcorespi/index/indexsearchablevisitor.h> #include <vespa/vespalib/data/slime/cursor.h> using vespalib::slime::Cursor; @@ -45,8 +46,23 @@ insertMemoryIndex(Cursor &arrayCursor, const MemoryIndexStats &memoryIndex) insertMemoryUsage(memoryIndexCursor, sstats.memoryUsage()); } -} +class WriteContextInserter : public IndexSearchableVisitor { +private: + Cursor& _object; + bool _has_inserted; + +public: + WriteContextInserter(Cursor& object) : _object(object), _has_inserted(false) {} + void visit(const index::IDiskIndex&) override {} + void visit(const index::IMemoryIndex& index) override { + if (!_has_inserted) { + index.insert_write_context_state(_object); + _has_inserted = true; + } + } +}; +} IndexManagerExplorer::IndexManagerExplorer(IIndexManager::SP mgr) : _mgr(std::move(mgr)) @@ -68,6 +84,9 @@ IndexManagerExplorer::get_state(const Inserter &inserter, bool full) const for (const auto &memoryIndex : stats.getMemoryIndexes()) { insertMemoryIndex(memoryIndexArrayCursor, memoryIndex); } + auto& write_contexts = object.setObject("write_contexts"); + WriteContextInserter visitor(write_contexts); + _mgr->getSearchable()->accept(visitor); } } diff --git a/searchcore/src/vespa/searchcorespi/index/ithreadingservice.h b/searchcore/src/vespa/searchcorespi/index/ithreadingservice.h index c325d5ded11..4fce6f85a2b 100644 --- a/searchcore/src/vespa/searchcorespi/index/ithreadingservice.h +++ b/searchcore/src/vespa/searchcorespi/index/ithreadingservice.h @@ -20,19 +20,19 @@ namespace searchcorespi::index { * * 2. The "index" write thread used for doing changes to the memory * index, either directly (for data not bound to a field) or via - * index field inverter executor or index field writer executor. + * field writer executor (index field inverter / index field writer). * * 3. The "summary" thread is used for doing changes to the document store. * - * 4. The "index field inverter" executor is used to populate field + * 4. The field writer executor ("index field inverter") is used to populate field * inverters with data from document fields. Scheduled tasks for * the same field are executed in sequence. * - * 5. The "index field writer" executor is used to sort data in field + * 5. The field writer executor ("index field writer") is used to sort data in field * inverters before pushing the data to the memory field indexes. * Scheduled tasks for the same field are executed in sequence. * - * 6. The "attribute field writer" executor is used to write data to attribute vectors. + * 6. The field writer executor ("attribute field writer") is used to write data to attribute vectors. * Each attribute is always handled by the same thread, * and scheduled tasks for the same attribute are executed in sequence. * @@ -47,19 +47,6 @@ namespace searchcorespi::index { * task to the index field inverter executor and the index field * writer executor. * - * The index field inverter executor and index field writer executor - * are separate to allow for double buffering, i.e. populate one set - * of field inverters using the index field inverter executor while - * another set of field inverters are handled by the index field - * writer executor. - * - * We might decide to allow index field inverter tasks to schedule - * tasks to the index field writer executor, so draining logic needs - * to sync index field inverter executor before syncing index field - * writer executor. - * - * TODO: * indexFieldInverter and indexFieldWriter can be collapsed to one. Both need sequencing, - * but they sequence on different things so efficiency will be the same and just depends on #threads */ struct IThreadingService { @@ -80,9 +67,7 @@ struct IThreadingService virtual vespalib::Executor &shared() = 0; virtual FNET_Transport &transport() = 0; virtual const vespalib::Clock &clock() const = 0; - virtual vespalib::ISequencedTaskExecutor &indexFieldInverter() = 0; - virtual vespalib::ISequencedTaskExecutor &indexFieldWriter() = 0; - virtual vespalib::ISequencedTaskExecutor &attributeFieldWriter() = 0; + virtual vespalib::ISequencedTaskExecutor &field_writer() = 0; }; } diff --git a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp index e3b9cf9702d..3547bf6c9a8 100644 --- a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp +++ b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp @@ -8,24 +8,25 @@ #include <vespa/searchlib/fef/matchdatalayout.h> #include <vespa/searchlib/fef/termfieldmatchdata.h> #include <vespa/searchlib/index/i_field_length_inspector.h> -#include <vespa/searchlib/test/doc_builder.h> -#include <vespa/searchlib/test/schema_builder.h> -#include <vespa/searchlib/test/string_field_builder.h> #include <vespa/searchlib/memoryindex/memory_index.h> #include <vespa/searchlib/query/tree/simplequery.h> +#include <vespa/searchlib/queryeval/blueprint.h> #include <vespa/searchlib/queryeval/booleanmatchiteratorwrapper.h> #include <vespa/searchlib/queryeval/fake_requestcontext.h> #include <vespa/searchlib/queryeval/fake_searchable.h> #include <vespa/searchlib/queryeval/leaf_blueprints.h> #include <vespa/searchlib/queryeval/searchiterator.h> -#include <vespa/searchlib/queryeval/simpleresult.h> #include <vespa/searchlib/queryeval/simple_phrase_blueprint.h> -#include <vespa/searchlib/queryeval/blueprint.h> +#include <vespa/searchlib/queryeval/simpleresult.h> +#include <vespa/searchlib/test/doc_builder.h> +#include <vespa/searchlib/test/schema_builder.h> +#include <vespa/searchlib/test/string_field_builder.h> +#include <vespa/vespalib/data/slime/slime.h> +#include <vespa/vespalib/gtest/gtest.h> #include <vespa/vespalib/util/sequencedtaskexecutor.h> #include <vespa/vespalib/util/size_literals.h> #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/threadstackexecutor.h> -#include <vespa/vespalib/gtest/gtest.h> #include <vespa/log/log.h> LOG_SETUP("memory_index_test"); @@ -36,7 +37,6 @@ using document::FieldValue; using search::ScheduleTaskCallback; using search::index::FieldLengthInfo; using search::index::IFieldLengthInspector; -using vespalib::makeLambdaTask; using search::query::Node; using search::query::SimplePhrase; using search::query::SimpleStringTerm; @@ -45,6 +45,11 @@ using search::test::SchemaBuilder; using search::test::StringFieldBuilder; using vespalib::ISequencedTaskExecutor; using vespalib::SequencedTaskExecutor; +using vespalib::Slime; +using vespalib::makeLambdaTask; +using vespalib::slime::JsonFormat; +using vespalib::slime::SlimeInserter; + using namespace search::fef; using namespace search::index; using namespace search::memoryindex; @@ -542,4 +547,18 @@ TEST(MemoryIndexTest, field_length_info_can_be_retrieved_per_field) EXPECT_EQ(0, index.index.get_field_length_info("na").get_num_samples()); } +TEST(MemoryIndexTest, write_context_state_as_slime) +{ + Index index(MySetup().field(title).field(body)); + Slime act; + SlimeInserter inserter(act); + index.index.insert_write_context_state(inserter.insertObject()); + Slime exp; + JsonFormat::decode("{\"invert\": [{\"executor_id\": 0, \"fields\": [\"body\"]}," + "{\"executor_id\": 1, \"fields\": [\"title\"]}]," + "\"push\": [{\"executor_id\": 0, \"fields\": [\"body\"]}," + "{\"executor_id\": 1, \"fields\": [\"title\"]}]}", exp); + EXPECT_EQ(exp, act); +} + GTEST_MAIN_RUN_ALL_TESTS() diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.h b/searchlib/src/vespa/searchlib/common/bitvectorcache.h index bb8f019c128..f4cced0afa4 100644 --- a/searchlib/src/vespa/searchlib/common/bitvectorcache.h +++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.h @@ -14,10 +14,10 @@ public: class Iterator { public: using UP = std::unique_ptr<Iterator>; - virtual ~Iterator() { } + virtual ~Iterator() = default; virtual int32_t getNext() = 0; }; - virtual ~PopulateInterface() { } + virtual ~PopulateInterface() = default; virtual Iterator::UP lookup(uint64_t key) const = 0; }; @@ -30,7 +30,7 @@ public: using CountVector = CondensedBitVector::CountVector; using GenerationHolder = vespalib::GenerationHolder; - BitVectorCache(GenerationHolder &genHolder); + explicit BitVectorCache(GenerationHolder &genHolder); ~BitVectorCache(); void computeCountVector(KeySet & keys, CountVector & v) const; KeySet lookupCachedSet(const KeyAndCountSet & keys); @@ -44,27 +44,38 @@ public: private: class KeyMeta { public: - KeyMeta() : - _lookupCount(0), - _bitCount(0), - _chunkId(-1), - _chunkIndex(0) + KeyMeta() noexcept + : _lookupCount(0), + _bitCount(0), + _chunkId(-1), + _chunkIndex(0) { } - double cost() const { return _bitCount * _lookupCount; } + KeyMeta(const KeyMeta & rhs) noexcept + : _lookupCount(rhs.lookupCount()), + _bitCount(rhs._bitCount), + _chunkId(rhs._chunkId), + _chunkIndex(rhs._chunkIndex) + {} + KeyMeta & operator = (const KeyMeta & rhs) { + _lookupCount.store(rhs.lookupCount(), std::memory_order_release); + _bitCount = rhs._bitCount; + _chunkId = rhs._chunkId; + _chunkIndex = rhs._chunkIndex; + return *this; + } + double cost() const { return _bitCount * lookupCount(); } bool isCached() const { return _chunkId >= 0; } size_t bitCount() const { return _bitCount; } size_t chunkIndex() const { return _chunkIndex; } size_t chunkId() const { return _chunkId; } - size_t lookupCount() const { return _lookupCount; } - KeyMeta & incBits() { _bitCount++; return *this; } - KeyMeta & decBits() { _bitCount--; return *this; } + size_t lookupCount() const { return _lookupCount.load(std::memory_order_relaxed); } KeyMeta & lookup() { _lookupCount++; return *this; } KeyMeta & bitCount(uint32_t v) { _bitCount = v; return *this; } KeyMeta & chunkId(uint32_t v) { _chunkId = v; return *this; } KeyMeta & chunkIndex(uint32_t v) { _chunkIndex = v; return *this; } KeyMeta & unCache() { _chunkId = -1; return *this; } private: - size_t _lookupCount; + std::atomic<size_t> _lookupCount; uint32_t _bitCount; int32_t _chunkId; uint32_t _chunkIndex; diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp index cdeb0515659..a330a4ff325 100644 --- a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp +++ b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp @@ -132,8 +132,7 @@ OnnxBlueprint::setup(const IIndexEnvironment &env, return fail("model setup failed: %s", ex.what()); } Onnx::WirePlanner planner; - for (size_t i = 0; i < _model->inputs().size(); ++i) { - const auto &model_input = _model->inputs()[i]; + for (const auto & model_input : _model->inputs()) { auto input_feature = model_cfg->input_feature(model_input.name); if (!input_feature.has_value()) { input_feature = fmt("rankingExpression(\"%s\")", normalize_name(model_input.name, "input").c_str()); @@ -151,8 +150,7 @@ OnnxBlueprint::setup(const IIndexEnvironment &env, } } planner.prepare_output_types(*_model); - for (size_t i = 0; i < _model->outputs().size(); ++i) { - const auto &model_output = _model->outputs()[i]; + for (const auto & model_output : _model->outputs()) { auto output_name = model_cfg->output_name(model_output.name); if (!output_name.has_value()) { output_name = normalize_name(model_output.name, "output"); diff --git a/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.cpp b/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.cpp index af7e19ee20d..4f9e88b323e 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.cpp @@ -20,9 +20,10 @@ BundledFieldsContext::add_field(uint32_t field_id) } void -BundledFieldsContext::add_uri_field(uint32_t uri_field_id) +BundledFieldsContext::add_uri_field(uint32_t uri_field_id, uint32_t uri_all_field_id) { _uri_fields.emplace_back(uri_field_id); + _uri_all_field_ids.emplace_back(uri_all_field_id); } } diff --git a/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.h b/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.h index fb1a68d7273..c058c14832d 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.h +++ b/searchlib/src/vespa/searchlib/memoryindex/bundled_fields_context.h @@ -16,16 +16,18 @@ class BundledFieldsContext vespalib::ISequencedTaskExecutor::ExecutorId _id; std::vector<uint32_t> _fields; std::vector<uint32_t> _uri_fields; + std::vector<uint32_t> _uri_all_field_ids; protected: BundledFieldsContext(vespalib::ISequencedTaskExecutor::ExecutorId id); ~BundledFieldsContext(); public: void add_field(uint32_t field_id); - void add_uri_field(uint32_t uri_field_id); + void add_uri_field(uint32_t uri_field_id, uint32_t uri_all_field_id); void set_id(vespalib::ISequencedTaskExecutor::ExecutorId id) { _id = id; } vespalib::ISequencedTaskExecutor::ExecutorId get_id() const noexcept { return _id; } const std::vector<uint32_t>& get_fields() const noexcept { return _fields; } const std::vector<uint32_t>& get_uri_fields() const noexcept { return _uri_fields; } + const std::vector<uint32_t>& get_uri_all_field_ids() const noexcept { return _uri_all_field_ids; } }; } diff --git a/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp b/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp index 8183cb005fe..93a12c24257 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp @@ -15,20 +15,20 @@ template <typename Context> void make_contexts(const index::Schema& schema, const SchemaIndexFields& schema_index_fields, ISequencedTaskExecutor& executor, std::vector<Context>& contexts) { using ExecutorId = ISequencedTaskExecutor::ExecutorId; - using IdMapping = std::vector<std::tuple<ExecutorId, bool, uint32_t>>; + using IdMapping = std::vector<std::tuple<ExecutorId, bool, uint32_t, uint32_t>>; IdMapping map; for (uint32_t field_id : schema_index_fields._textFields) { // TODO: Add bias when sharing sequenced task executor between document types auto& name = schema.getIndexField(field_id).getName(); auto id = executor.getExecutorIdFromName(name); - map.emplace_back(id, false, field_id); + map.emplace_back(id, false, field_id, 0); } uint32_t uri_field_id = 0; for (auto& uri_field : schema_index_fields._uriFields) { // TODO: Add bias when sharing sequenced task executor between document types auto& name = schema.getIndexField(uri_field._all).getName(); auto id = executor.getExecutorIdFromName(name); - map.emplace_back(id, true, uri_field_id); + map.emplace_back(id, true, uri_field_id, uri_field._all); ++uri_field_id; } std::sort(map.begin(), map.end()); @@ -39,7 +39,7 @@ void make_contexts(const index::Schema& schema, const SchemaIndexFields& schema_ prev_id = std::get<0>(entry); } if (std::get<1>(entry)) { - contexts.back().add_uri_field(std::get<2>(entry)); + contexts.back().add_uri_field(std::get<2>(entry), std::get<3>(entry)); } else { contexts.back().add_field(std::get<2>(entry)); } diff --git a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp index 643bcbb325e..86421711e32 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp @@ -7,13 +7,14 @@ #include "field_index_collection.h" #include <vespa/document/fieldvalue/arrayfieldvalue.h> #include <vespa/document/fieldvalue/document.h> -#include <vespa/vespalib/util/isequencedtaskexecutor.h> #include <vespa/searchlib/index/field_length_calculator.h> #include <vespa/searchlib/index/schemautil.h> #include <vespa/searchlib/queryeval/create_blueprint_visitor_helper.h> #include <vespa/searchlib/queryeval/emptysearch.h> #include <vespa/searchlib/queryeval/leaf_blueprints.h> #include <vespa/vespalib/btree/btreenodeallocator.hpp> +#include <vespa/vespalib/data/slime/cursor.h> +#include <vespa/vespalib/util/isequencedtaskexecutor.h> #include <vespa/log/log.h> LOG_SETUP(".searchlib.memoryindex.memory_index"); @@ -47,6 +48,7 @@ using queryeval::FieldSpec; using queryeval::IRequestContext; using queryeval::Searchable; using vespalib::ISequencedTaskExecutor; +using vespalib::slime::Cursor; } @@ -251,4 +253,42 @@ MemoryIndex::get_field_length_info(const vespalib::string& field_name) const return FieldLengthInfo(); } +namespace { + +void +fields_to_slime(const std::vector<uint32_t>& field_ids, const Schema& schema, Cursor& array) +{ + for (uint32_t field_id : field_ids) { + assert(field_id < schema.getIndexFields().size()); + const auto& field = schema.getIndexField(field_id); + array.addString(field.getName()); + } +} + +void +write_context_to_slime(const BundledFieldsContext& ctx, const Schema& schema, Cursor& object) +{ + object.setLong("executor_id", ctx.get_id().getId()); + auto& fields = object.setArray("fields"); + fields_to_slime(ctx.get_fields(), schema, fields); + fields_to_slime(ctx.get_uri_all_field_ids(), schema, fields); +} + +} + +void +MemoryIndex::insert_write_context_state(Cursor& object) const +{ + auto& invert = object.setArray("invert"); + for (const auto& ctx : _inverter_context->get_invert_contexts()) { + auto& ctx_obj = invert.addObject(); + write_context_to_slime(ctx, _schema, ctx_obj); + } + auto& push = object.setArray("push"); + for (const auto& ctx : _inverter_context->get_push_contexts()) { + auto& ctx_obj = push.addObject(); + write_context_to_slime(ctx, _schema, ctx_obj); + } +} + } diff --git a/searchlib/src/vespa/searchlib/memoryindex/memory_index.h b/searchlib/src/vespa/searchlib/memoryindex/memory_index.h index af76ed172ba..320c6fba277 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/memory_index.h +++ b/searchlib/src/vespa/searchlib/memoryindex/memory_index.h @@ -17,7 +17,7 @@ namespace search::index { } namespace vespalib { class ISequencedTaskExecutor; } - +namespace vespalib::slime { struct Cursor; } namespace document { class Document; } namespace search::memoryindex { @@ -175,6 +175,8 @@ public: uint64_t getStaticMemoryFootprint() const { return _staticMemoryFootprint; } index::FieldLengthInfo get_field_length_info(const vespalib::string& field_name) const; + + void insert_write_context_state(vespalib::slime::Cursor& object) const; }; } diff --git a/searchlib/src/vespa/searchlib/util/fileutil.cpp b/searchlib/src/vespa/searchlib/util/fileutil.cpp index c2f86224312..f602c66b544 100644 --- a/searchlib/src/vespa/searchlib/util/fileutil.cpp +++ b/searchlib/src/vespa/searchlib/util/fileutil.cpp @@ -36,7 +36,9 @@ LoadedMmap::LoadedMmap(const vespalib::string &fileName) if (sz) { void *tmpBuffer = mmap(nullptr, sz, PROT_READ, MAP_PRIVATE, fd.fd(), 0); if (tmpBuffer != MAP_FAILED) { +#ifdef __linux__ madvise(tmpBuffer, sz, MADV_DONTDUMP); +#endif _mapSize = sz; _mapBuffer = tmpBuffer; uint32_t hl = GenericHeader::getMinSize(); diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp index 7eb9dfe6269..074b1492a6e 100644 --- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp +++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp @@ -829,6 +829,36 @@ TEST_F(TopLevelBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_f EXPECT_FALSE(bucket_exists_that_has_node(100, 1)); } +TEST_F(TopLevelBucketDBUpdaterTest, node_removed_from_distribution_config_clears_buckets_for_node) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); + enable_distributor_cluster_state("distributor:1 storage:3"); + + for (int i = 1; i < 100; ++i) { + add_ideal_nodes(document::BucketId(16, i)); + } + + EXPECT_TRUE(bucket_exists_that_has_node(100, 1)); + + // Node 1 is removed, 0 and 2 remain + auto distribution_config = "redundancy 2\n" + "group[2]\n" + "group[0].name \"invalid\"\n" + "group[0].index \"invalid\"\n" + "group[0].partitions 1|*\n" + "group[0].nodes[0]\n" + "group[1].name coolnodes\n" + "group[1].index 0\n" + "group[1].nodes[2]\n" + "group[1].nodes[0].index 0\n" + "group[1].nodes[2].index 2\n"; + + set_distribution(distribution_config); + + EXPECT_TRUE( bucket_exists_that_has_node(100, 0)); + EXPECT_FALSE(bucket_exists_that_has_node(100, 1)); + EXPECT_TRUE( bucket_exists_that_has_node(100, 2)); +} + TEST_F(TopLevelBucketDBUpdaterTest, node_down_copies_get_in_sync) { ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); document::BucketId bid(16, 1); diff --git a/storage/src/vespa/storage/distributor/activecopy.cpp b/storage/src/vespa/storage/distributor/activecopy.cpp index 4c35d42a0e7..e9d6d8cca30 100644 --- a/storage/src/vespa/storage/distributor/activecopy.cpp +++ b/storage/src/vespa/storage/distributor/activecopy.cpp @@ -132,7 +132,7 @@ ActiveCopy::calculate(const Node2Index & idealState, const lib::Distribution& di { IndexList validNodesWithCopy = buildValidNodeIndexList(e); if (validNodesWithCopy.empty()) { - return ActiveList(); + return {}; } std::vector<IndexList> groups; if (distribution.activePerGroup()) { @@ -162,7 +162,7 @@ ActiveCopy::calculate(const Node2Index & idealState, const lib::Distribution& di } result.emplace_back(*best); } - return ActiveList(std::move(result)); + return {std::move(result)}; } void @@ -170,8 +170,8 @@ ActiveList::print(std::ostream& out, bool verbose, const std::string& indent) co { out << "["; if (verbose) { - for (size_t i=0; i<_v.size(); ++i) { - out << "\n" << indent << " " << _v[i].nodeIndex() << " " << _v[i].getReason(); + for (const auto & copy : _v) { + out << "\n" << indent << " " << copy.nodeIndex() << " " << copy.getReason(); } if (!_v.empty()) { out << "\n" << indent; diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp index 4c8e51908b0..fd747484ccf 100644 --- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp +++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp @@ -659,11 +659,14 @@ StripeBucketDBUpdater::MergingNodeRemover::MergingNodeRemover( _cachedDecisionSuperbucket(UINT64_MAX), _cachedOwned(false) { - // TODO intersection of cluster state and distribution config const uint16_t storage_count = s.getNodeCount(lib::NodeType::STORAGE); _available_nodes.resize(storage_count); for (uint16_t i = 0; i < storage_count; ++i) { - if (s.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState().oneOf(_upStates)) { + // To be considered available, a given node index must both be marked as available in the + // cluster state AND be present (have a valid node -> group mapping) in the distribution config. + if (s.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState().oneOf(_upStates) + && node_is_present_in_config(i)) + { _available_nodes[i] = true; } } @@ -791,6 +794,12 @@ StripeBucketDBUpdater::MergingNodeRemover::storage_node_is_available(uint16_t in return ((index < _available_nodes.size()) && _available_nodes[index]); } +bool +StripeBucketDBUpdater::MergingNodeRemover::node_is_present_in_config(uint16_t node_index) const noexcept +{ + return (_distribution.getNodeGraph().getGroupForNode(node_index) != nullptr); +} + StripeBucketDBUpdater::MergingNodeRemover::~MergingNodeRemover() = default; namespace { diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h index b8b729edbeb..2e4ef2a7543 100644 --- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h +++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h @@ -199,18 +199,19 @@ private: Result merge(BucketDatabase::Merger&) override; static void logRemove(const document::BucketId& bucketId, const char* msg) ; - bool distributorOwnsBucket(const document::BucketId&) const; + [[nodiscard]] bool distributorOwnsBucket(const document::BucketId&) const; const std::vector<BucketDatabase::Entry>& getNonOwnedEntries() const noexcept { return _nonOwnedBuckets; } - size_t removed_buckets() const noexcept { return _removed_buckets; } - size_t removed_documents() const noexcept { return _removed_documents; } + [[nodiscard]] size_t removed_buckets() const noexcept { return _removed_buckets; } + [[nodiscard]] size_t removed_documents() const noexcept { return _removed_documents; } private: void setCopiesInEntry(BucketDatabase::Entry& e, const std::vector<BucketCopy>& copies) const; - bool has_unavailable_nodes(const BucketDatabase::Entry&) const; - bool storage_node_is_available(uint16_t index) const noexcept; + [[nodiscard]] bool has_unavailable_nodes(const BucketDatabase::Entry&) const; + [[nodiscard]] bool storage_node_is_available(uint16_t index) const noexcept; + [[nodiscard]] bool node_is_present_in_config(uint16_t node_index) const noexcept; const lib::ClusterState _state; std::vector<bool> _available_nodes; diff --git a/testutil/pom.xml b/testutil/pom.xml index 7f57b6defee..a26459c0b8a 100644 --- a/testutil/pom.xml +++ b/testutil/pom.xml @@ -24,7 +24,7 @@ <groupId>com.google.inject</groupId> <artifactId>guice</artifactId> <scope>provided</scope> - <classifier>no_aop</classifier> + </dependency> <dependency> <groupId>com.yahoo.vespa</groupId> diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt index e8ecee89350..f3d7489c960 100644 --- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt +++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt @@ -20,9 +20,6 @@ com.fasterxml.jackson.core:jackson-databind:2.15.2 com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:2.15.2 com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.15.2 com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.15.2 -com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.15.2 -com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.15.2 -com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.15.2 com.github.luben:zstd-jni:1.5.5-5 com.github.spotbugs:spotbugs-annotations:3.1.9 com.google.auth:google-auth-library-credentials:1.19.0 @@ -36,21 +33,21 @@ com.google.guava:guava:32.1.2-jre com.google.http-client:google-http-client:1.43.3 com.google.http-client:google-http-client-apache-v2:1.43.3 com.google.http-client:google-http-client-gson:1.42.3 -com.google.inject:guice:4.2.3:no_aop +com.google.inject:guice:6.0.0 com.google.j2objc:j2objc-annotations:2.8 com.google.protobuf:protobuf-java:3.24.2 com.ibm.icu:icu4j:73.2 com.microsoft.onnxruntime:onnxruntime:1.15.1 com.sun.activation:javax.activation:1.2.0 -com.sun.istack:istack-commons-runtime:3.0.12 -com.sun.xml.bind:jaxb-core:2.3.0 +com.sun.istack:istack-commons-runtime:4.1.2 +com.sun.xml.bind:jaxb-core:2.3.0.1 com.sun.xml.bind:jaxb-impl:2.3.0 com.thaiopensource:jing:20091111 -com.yahoo.athenz:athenz-auth-core:1.11.40 -com.yahoo.athenz:athenz-client-common:1.11.40 -com.yahoo.athenz:athenz-zms-core:1.11.40 -com.yahoo.athenz:athenz-zpe-java-client:1.11.40 -com.yahoo.athenz:athenz-zts-core:1.11.40 +com.yahoo.athenz:athenz-auth-core:1.11.41 +com.yahoo.athenz:athenz-client-common:1.11.41 +com.yahoo.athenz:athenz-zms-core:1.11.41 +com.yahoo.athenz:athenz-zpe-java-client:1.11.41 +com.yahoo.athenz:athenz-zts-core:1.11.41 com.yahoo.rdl:rdl-java:1.5.4 commons-cli:commons-cli:1.5.0 commons-codec:commons-codec:1.16.0 @@ -58,7 +55,7 @@ commons-fileupload:commons-fileupload:1.5 commons-io:commons-io:2.13.0 commons-logging:commons-logging:1.2 io.airlift:airline:0.9 -io.dropwizard.metrics:metrics-core:3.2.6 +io.dropwizard.metrics:metrics-core:4.2.19 io.grpc:grpc-context:1.27.2 io.jsonwebtoken:jjwt-api:0.11.5 io.jsonwebtoken:jjwt-impl:0.11.5 @@ -81,20 +78,19 @@ io.prometheus:simpleclient_common:0.16.0 io.prometheus:simpleclient_tracer_common:0.16.0 io.prometheus:simpleclient_tracer_otel:0.16.0 io.prometheus:simpleclient_tracer_otel_agent:0.16.0 -jakarta.annotation:jakarta.annotation-api:1.3.5 -jakarta.validation:jakarta.validation-api:2.0.2 -jakarta.ws.rs:jakarta.ws.rs-api:2.1.6 +jakarta.inject:jakarta.inject-api:2.0.1 +javax.activation:javax.activation-api:1.2.0 javax.annotation:javax.annotation-api:1.2 javax.inject:javax.inject:1 javax.servlet:javax.servlet-api:3.1.0 javax.ws.rs:javax.ws.rs-api:2.1.1 -javax.xml.bind:jaxb-api:2.3.0 +javax.xml.bind:jaxb-api:2.3.1 joda-time:joda-time:2.12.5 junit:junit:4.13.2 net.java.dev.jna:jna:5.13.0 net.openhft:zero-allocation-hashing:0.16 org.antlr:antlr-runtime:3.5.3 -org.antlr:antlr4-runtime:4.13.0 +org.antlr:antlr4-runtime:4.13.1 org.apache.aries.spifly:org.apache.aries.spifly.dynamic.bundle:1.3.6 org.apache.commons:commons-compress:1.23.0 org.apache.commons:commons-csv:1.10.0 @@ -105,7 +101,7 @@ org.apache.curator:curator-client:5.5.0 org.apache.curator:curator-framework:5.5.0 org.apache.curator:curator-recipes:5.5.0 org.apache.felix:org.apache.felix.framework:7.0.5 -org.apache.felix:org.apache.felix.log:1.0.1 +org.apache.felix:org.apache.felix.log:1.3.0 org.apache.httpcomponents:httpclient:4.5.14 org.apache.httpcomponents:httpcore:4.4.16 org.apache.httpcomponents:httpmime:4.5.14 @@ -128,7 +124,7 @@ org.apache.maven.plugin-tools:maven-plugin-annotations:3.9.0 org.apache.maven.plugins:maven-jar-plugin:3.3.0 org.apache.maven.shared:file-management:3.1.0 org.apache.maven.wagon:wagon-provider-api:3.5.3 -org.apache.opennlp:opennlp-tools:1.9.4 +org.apache.opennlp:opennlp-tools:2.3.0 org.apache.velocity:velocity-engine-core:2.3 org.apache.yetus:audience-annotations:0.12.0 org.apache.zookeeper:zookeeper:3.8.0 @@ -146,6 +142,7 @@ org.codehaus.plexus:plexus-container-default:1.0-alpha-9-stable-1 org.codehaus.plexus:plexus-interpolation:1.26 org.codehaus.plexus:plexus-io:3.4.1 org.codehaus.plexus:plexus-utils:3.5.1 +org.eclipse.angus:angus-activation:2.0.1 org.eclipse.collections:eclipse-collections:11.1.0 org.eclipse.collections:eclipse-collections-api:11.1.0 org.eclipse.jetty:jetty-alpn-client:11.0.16 @@ -169,17 +166,9 @@ org.eclipse.jetty.toolchain:jetty-jakarta-servlet-api:5.0.2 org.eclipse.sisu:org.eclipse.sisu.inject:0.3.5 org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.5 org.fusesource.jansi:jansi:1.18 -org.glassfish.hk2:osgi-resource-locator:1.0.3 -org.glassfish.hk2.external:jakarta.inject:2.6.1 -org.glassfish.jaxb:jaxb-runtime:2.3.8 -org.glassfish.jaxb:txw2:2.3.8 -org.glassfish.jersey.core:jersey-client:2.40 -org.glassfish.jersey.core:jersey-common:2.40 -org.glassfish.jersey.core:jersey-server:2.40 -org.glassfish.jersey.ext:jersey-entity-filtering:2.40 -org.glassfish.jersey.ext:jersey-proxy-client:2.40 -org.glassfish.jersey.media:jersey-media-json-jackson:2.40 -org.glassfish.jersey.media:jersey-media-multipart:2.40 +org.glassfish.jaxb:jaxb-core:4.0.3 +org.glassfish.jaxb:jaxb-runtime:4.0.3 +org.glassfish.jaxb:txw2:4.0.3 org.hamcrest:hamcrest:2.2 org.hamcrest:hamcrest-core:2.2 org.hdrhistogram:HdrHistogram:2.1.12 @@ -190,18 +179,15 @@ org.junit.jupiter:junit-jupiter-engine:5.10.0 org.junit.platform:junit-platform-commons:1.10.0 org.junit.platform:junit-platform-engine:1.10.0 org.junit.platform:junit-platform-launcher:1.10.0 -org.jvnet.mimepull:mimepull:1.10.0 org.kohsuke:libpam4j:1.11 org.lz4:lz4-java:1.8.0 org.opentest4j:opentest4j:1.3.0 -org.osgi:org.osgi.compendium:4.1.0 -org.osgi:org.osgi.core:4.1.0 org.ow2.asm:asm:9.5 org.ow2.asm:asm-analysis:9.5 org.ow2.asm:asm-commons:9.5 org.ow2.asm:asm-tree:9.5 org.ow2.asm:asm-util:9.5 -org.questdb:questdb:6.3.1 +org.questdb:questdb:7.3.1 org.slf4j:jcl-over-slf4j:1.7.36 org.slf4j:log4j-over-slf4j:1.7.36 org.slf4j:slf4j-api:1.7.36 @@ -211,23 +197,18 @@ org.tukaani:xz:1.9 org.xerial.snappy:snappy-java:1.1.10.3 software.amazon.ion:ion-java:1.0.2 xerces:xercesImpl:2.12.2 -xml-apis:xml-apis:1.4.01 #[test-only] # Contains dependencies that are used exclusively in 'test' scope -com.github.tomakehurst:wiremock-jre8-standalone:2.35.0 -com.google.guava:guava-testlib:32.1.2-jre -com.google.inject:guice:4.2.3 com.google.jimfs:jimfs:1.3.0 -net.bytebuddy:byte-buddy:1.12.21 -net.bytebuddy:byte-buddy:1.14.6 -net.bytebuddy:byte-buddy-agent:1.14.6 +net.bytebuddy:byte-buddy:1.14.7 +net.bytebuddy:byte-buddy-agent:1.14.7 org.apache.curator:curator-test:5.5.0 org.assertj:assertj-core:3.24.2 -org.checkerframework:checker-qual:3.37.0 org.junit.jupiter:junit-jupiter:5.10.0 org.junit.jupiter:junit-jupiter-params:5.10.0 org.junit.vintage:junit-vintage-engine:5.10.0 org.mockito:mockito-core:5.5.0 org.mockito:mockito-junit-jupiter:5.5.0 org.objenesis:objenesis:3.3 +org.wiremock:wiremock-standalone:3.0.1 diff --git a/vespalib/src/tests/fastos/file_test.cpp b/vespalib/src/tests/fastos/file_test.cpp index 99c7b858723..a9cec81d084 100644 --- a/vespalib/src/tests/fastos/file_test.cpp +++ b/vespalib/src/tests/fastos/file_test.cpp @@ -181,16 +181,6 @@ TEST(FileTest, ReadWriteTest) { EXPECT_TRUE(std::filesystem::remove(std::filesystem::path(rwFilename))); } -TEST(FileTest, ScanDirectoryTest) { - auto scanDir = std::make_unique<FastOS_DirectoryScan>("."); - while (scanDir->ReadNext()) { - const char *name = scanDir->GetName(); - bool isDirectory = scanDir->IsDirectory(); - bool isRegular = scanDir->IsRegular(); - fprintf(stderr, "%-30s %s\n", name, isDirectory ? "DIR" : (isRegular ? "FILE" : "UNKN")); - } -} - TEST(FileTest, ReadBufTest) { FastOS_File file(roFilename.c_str()); char buffer[20]; diff --git a/vespalib/src/vespa/fastos/file.cpp b/vespalib/src/vespa/fastos/file.cpp index 19a000296de..0c05c1ad894 100644 --- a/vespalib/src/vespa/fastos/file.cpp +++ b/vespalib/src/vespa/fastos/file.cpp @@ -306,10 +306,3 @@ FastOS_FileInterface::getLastErrorString() void FastOS_FileInterface::dropFromCache() const { } - -FastOS_DirectoryScanInterface::FastOS_DirectoryScanInterface(const char *path) - : _searchPath(path) -{ -} - -FastOS_DirectoryScanInterface::~FastOS_DirectoryScanInterface() = default; diff --git a/vespalib/src/vespa/fastos/file.h b/vespalib/src/vespa/fastos/file.h index 079a2b610f1..cd7a22a02b2 100644 --- a/vespalib/src/vespa/fastos/file.h +++ b/vespalib/src/vespa/fastos/file.h @@ -2,8 +2,7 @@ //************************************************************************ /** * @file - * Class definitions for FastOS_File, FastOS_DirectoryScan and - * FastOS_StatInfo. + * Class definitions for FastOS_File and FastOS_StatInfo. * * @author Div, Oivind H. Danielsen */ @@ -544,104 +543,6 @@ public: vespalib::system_time _modifiedTime; }; - -/** - * This class enumerates the contents of a given directory. - * - * Example: - * @code - * void Foo::Bar() - * { - * // Scan and print the contents of the directory '/usr/src/include' - * - * FastOS_DirectoryScan dirScan("/usr/src/include"); - * int numEntries = 0; - * - * while(dirScan.ReadNext()) - * { - * const char *name = dirScan.GetName(); - * bool isDirectory = dirScan.IsDirectory(); - * bool isRegular = dirScan.IsRegular(); - * - * printf("%-30s %s\n", name, - * isDirectory ? "DIR" : (isRegular ? "FILE" : "UNKN")); - * - * numEntries++; - * } - * - * printf("The directory contained %d entries.\n", numEntries); - * } - * @endcode - */ -class FastOS_DirectoryScanInterface -{ -protected: - std::string _searchPath; - -public: - FastOS_DirectoryScanInterface(const FastOS_DirectoryScanInterface&) = delete; - FastOS_DirectoryScanInterface& operator= (const FastOS_DirectoryScanInterface&) = delete; - - /** - * Constructor. - * - * @param path Path of the directory to be scanned. The path string - * is copied internally. - */ - FastOS_DirectoryScanInterface(const char *path); - - /** - * Destructor. - * - * Frees operating system resources related to the directory scan. - */ - virtual ~FastOS_DirectoryScanInterface(); - - /** - * Read the next entry in the directory scan. Failure indicates - * that there are no more entries. If the call is successful, - * attributes for the entry can be read with @ref IsDirectory(), - * @ref IsRegular() and @ref GetName(). - * - * @return Boolean success/failure - */ - virtual bool ReadNext() = 0; - - /** - * After a successful @ref ReadNext() this method is used to - * determine if the entry is a directory entry or not. Calling this - * method after an unsuccessful @ref ReadNext() or before - * @ref ReadNext() is called for the first time, yields undefined - * results. - * - * @return True if the entry is a directory, else false. - */ - virtual bool IsDirectory() = 0; - - - /** - * After a successful @ref ReadNext() this method is used to - * determine if the entry is a regular file entry or not. Calling - * this method after an unsuccessful @ref ReadNext() or before - * @ref ReadNext() is called for the first time, yields undefined - * results. - * - * @return True if the entry is a regular file, else false. - */ - virtual bool IsRegular() = 0; - - /** - * After a successful @ref ReadNext() this method is used to - * determine the name of the recently read directory entry. Calling - * this method after an unsuccessful @ref ReadNext() or before - * @ref ReadNext() is called for the first time, yields undefined - * results. - * - * @return A pointer to the recently read directory entry. - */ - virtual const char *GetName() = 0; -}; - #ifdef __linux__ #include <vespa/fastos/linux_file.h> typedef FastOS_Linux_File FASTOS_PREFIX(File); @@ -649,4 +550,3 @@ typedef FastOS_Linux_File FASTOS_PREFIX(File); #include <vespa/fastos/unix_file.h> typedef FastOS_UNIX_File FASTOS_PREFIX(File); #endif -typedef FastOS_UNIX_DirectoryScan FASTOS_PREFIX(DirectoryScan); diff --git a/vespalib/src/vespa/fastos/linux_file.cpp b/vespalib/src/vespa/fastos/linux_file.cpp index b8ee005517a..3344250838c 100644 --- a/vespalib/src/vespa/fastos/linux_file.cpp +++ b/vespalib/src/vespa/fastos/linux_file.cpp @@ -11,6 +11,7 @@ #include "file.h" #include "file_rw_ops.h" #include <sstream> +#include <dirent.h> #include <unistd.h> #include <fcntl.h> #include <cstdio> diff --git a/vespalib/src/vespa/fastos/unix_file.cpp b/vespalib/src/vespa/fastos/unix_file.cpp index 6d10338aec1..b9fe46e920d 100644 --- a/vespalib/src/vespa/fastos/unix_file.cpp +++ b/vespalib/src/vespa/fastos/unix_file.cpp @@ -368,114 +368,3 @@ FastOS_UNIX_File::count_open_files() return 0; #endif } - -FastOS_UNIX_DirectoryScan::FastOS_UNIX_DirectoryScan(const char *searchPath) - : FastOS_DirectoryScanInterface(searchPath), - _statRun(false), - _isDirectory(false), - _isRegular(false), - _statName(nullptr), - _statFilenameP(nullptr), - _dir(nullptr), - _dp(nullptr) -{ - _dir = opendir(searchPath); - - const int minimumLength = 512 + 1; - const int defaultLength = 16384; - - int maxNameLength = FastOS_File::GetMaximumFilenameLength(searchPath); - int maxPathLength = FastOS_File::GetMaximumPathLength(searchPath); - int nameLength = maxNameLength + 1 + maxPathLength; - - if ((maxNameLength == -1) || - (maxPathLength == -1) || - (nameLength < minimumLength)) - { - nameLength = defaultLength; - } - - _statName = new char [nameLength + 1]; // Include null - - strcpy(_statName, searchPath); - strcat(_statName, "/"); - - _statFilenameP = &_statName[strlen(_statName)]; -} - - -FastOS_UNIX_DirectoryScan::~FastOS_UNIX_DirectoryScan() -{ - if (_dir != nullptr) { - closedir(_dir); - _dir = nullptr; - } - delete [] _statName; -} - - -bool -FastOS_UNIX_DirectoryScan::ReadNext() -{ - _statRun = false; - - if (_dir != nullptr) { - _dp = readdir(_dir); - return (_dp != nullptr); - } - - return false; -} - - -void -FastOS_UNIX_DirectoryScan::DoStat() -{ - struct stat stbuf{}; - - assert(_dp != nullptr); - - strcpy(_statFilenameP, _dp->d_name); - - if (lstat(_statName, &stbuf) == 0) { - _isRegular = S_ISREG(stbuf.st_mode); - _isDirectory = S_ISDIR(stbuf.st_mode); - } else { - printf("lstat failed for [%s]\n", _dp->d_name); - _isRegular = false; - _isDirectory = false; - } - - _statRun = true; -} - - -bool -FastOS_UNIX_DirectoryScan::IsDirectory() -{ - if (!_statRun) { - DoStat(); - } - - return _isDirectory; -} - - -bool -FastOS_UNIX_DirectoryScan::IsRegular() -{ - if (!_statRun) { - DoStat(); - } - - return _isRegular; -} - - -const char * -FastOS_UNIX_DirectoryScan::GetName() -{ - assert(_dp != nullptr); - - return static_cast<const char *>(_dp->d_name); -} diff --git a/vespalib/src/vespa/fastos/unix_file.h b/vespalib/src/vespa/fastos/unix_file.h index dad75dc561f..81e5de901a3 100644 --- a/vespalib/src/vespa/fastos/unix_file.h +++ b/vespalib/src/vespa/fastos/unix_file.h @@ -4,7 +4,7 @@ * @author Oivind H. Danielsen * @date Creation date: 2000-01-18 * @file -* Class definitions for FastOS_UNIX_File and FastOS_UNIX_DirectoryScan. +* Class definitions for FastOS_UNIX_File *****************************************************************************/ #pragma once @@ -83,32 +83,3 @@ public: static int64_t GetFreeDiskSpace (const char *path); static int count_open_files(); }; - -#include <dirent.h> -/** - * This is the generic UNIX implementation of @ref FastOS_DirectoryScan. - */ -class FastOS_UNIX_DirectoryScan : public FastOS_DirectoryScanInterface -{ -private: - bool _statRun; - bool _isDirectory; - bool _isRegular; - char *_statName; - char *_statFilenameP; - - void DoStat(); - -protected: - DIR *_dir; - struct dirent *_dp; - -public: - FastOS_UNIX_DirectoryScan(const char *searchPath); - ~FastOS_UNIX_DirectoryScan(); - - bool ReadNext() override; - bool IsDirectory() override; - bool IsRegular() override; - const char *GetName() override; -}; diff --git a/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp b/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp index 7f1f0d003b7..95e4a1b496f 100644 --- a/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp +++ b/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp @@ -20,7 +20,9 @@ MappedFileInput::MappedFileInput(const vespalib::string &file_name) if (_data != MAP_FAILED) { _size = info.st_size; madvise(_data, _size, MADV_SEQUENTIAL); +#ifdef __linux__ madvise(_data, _size, MADV_DONTDUMP); +#endif } } } |